compacted the virt-reg numbers
diff --git a/llvm/test/CodeGen/AMDGPU/regpressure-mitigation-with-subreg-reload.mir b/llvm/test/CodeGen/AMDGPU/regpressure-mitigation-with-subreg-reload.mir
index 4328923..a0993d7 100644
--- a/llvm/test/CodeGen/AMDGPU/regpressure-mitigation-with-subreg-reload.mir
+++ b/llvm/test/CodeGen/AMDGPU/regpressure-mitigation-with-subreg-reload.mir
@@ -16,22 +16,22 @@
   frameOffsetReg:  '$sgpr33'
   stackPtrOffsetReg: '$sgpr32'
 body:             |
-  bb.0.entry:
+  bb.0:
     liveins: $vgpr1, $sgpr0_sgpr1, $sgpr2_sgpr3
 
-    %8:vgpr_32 = COPY $vgpr1
-    %12:vreg_64 = COPY killed renamable $sgpr0_sgpr1
-    %11:vreg_128 = FLAT_LOAD_DWORDX4 %12, 0, 0, implicit $exec, implicit $flat_scr
-    undef early-clobber %49.sub0_sub1:vreg_128, $sgpr_null = V_MAD_U64_U32_gfx11_e64 %11.sub0, 42, 0, 0, implicit $exec
-    undef %50.sub0:vreg_64 = COPY %49.sub1
-    early-clobber %49.sub1_sub2:vreg_128, $sgpr_null = V_MAD_U64_U32_gfx11_e64 %11.sub1, 42, %50, 0, implicit $exec
-    early-clobber %15:vreg_64, $sgpr_null = V_MAD_U64_U32_gfx11_e64 %11.sub2, 42, 0, 0, implicit $exec
-    undef %52.sub0:vreg_64 = COPY %15.sub1
-    early-clobber %27:vreg_64, $sgpr_null = V_MAD_U64_U32_gfx11_e64 %11.sub3, 42, %52, 0, implicit $exec
-    %49.sub2:vreg_128 = COPY %15.sub0
-    %49.sub3:vreg_128 = COPY %27.sub0
-    $vgpr31 = COPY %8
+    %1:vgpr_32 = COPY $vgpr1
+    %2:vreg_64 = COPY killed renamable $sgpr0_sgpr1
+    %3:vreg_128 = FLAT_LOAD_DWORDX4 %2, 0, 0, implicit $exec, implicit $flat_scr
+    undef early-clobber %4.sub0_sub1:vreg_128, $sgpr_null = V_MAD_U64_U32_gfx11_e64 %3.sub0, 42, 0, 0, implicit $exec
+    undef %5.sub0:vreg_64 = COPY %4.sub1
+    early-clobber %4.sub1_sub2:vreg_128, $sgpr_null = V_MAD_U64_U32_gfx11_e64 %3.sub1, 42, %5, 0, implicit $exec
+    early-clobber %6:vreg_64, $sgpr_null = V_MAD_U64_U32_gfx11_e64 %3.sub2, 42, 0, 0, implicit $exec
+    undef %7.sub0:vreg_64 = COPY %6.sub1
+    early-clobber %8:vreg_64, $sgpr_null = V_MAD_U64_U32_gfx11_e64 %3.sub3, 42, %7, 0, implicit $exec
+    %4.sub2:vreg_128 = COPY %6.sub0
+    %4.sub3:vreg_128 = COPY %8.sub0
+    $vgpr31 = COPY %1
     INLINEASM &"; use v1", 1, 327690, $vgpr1
-    FLAT_STORE_DWORDX4 %12, %49, 0, 0, implicit $exec, implicit $flat_scr
+    FLAT_STORE_DWORDX4 %2, %4, 0, 0, implicit $exec, implicit $flat_scr
     SI_RETURN
 ...