[AMDGPU] Fix CHECK lines
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir
index b56a67d..f221250 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir
@@ -455,7 +455,7 @@
   bb.0:
     liveins: $sgpr0_sgpr1
     ; CHECK-LABEL: name: load_constant_v8i32_uniform
-    ; CHECK (<8 x s32>) = G_LOAD %0 :: (load 32, addrspace 4)
+    ; CHECK: (<8 x s32>) = G_LOAD %0(p4) :: (load 32, addrspace 4)
     %0:_(p4) = COPY $sgpr0_sgpr1
     %1:_(<8 x s32>) = G_LOAD %0 :: (load 32, addrspace 4)
 ...
@@ -468,7 +468,7 @@
   bb.0:
     liveins: $sgpr0_sgpr1
     ; CHECK-LABEL: name: load_constant_v4i64_uniform
-    ; CHECK (<4 x s64>) = G_LOAD %0 :: (load 32, addrspace 4)
+    ; CHECK: (<4 x s64>) = G_LOAD %0(p4) :: (load 32, addrspace 4)
     %0:_(p4) = COPY $sgpr0_sgpr1
     %1:_(<4 x s64>) = G_LOAD %0 :: (load 32, addrspace 4)
 ...
@@ -481,7 +481,7 @@
   bb.0:
     liveins: $sgpr0_sgpr1
     ; CHECK-LABEL: name: load_constant_v16i32_uniform
-    ; CHECK (<16 x s32>) = G_LOAD %0 :: (load 64, addrspace 4)
+    ; CHECK: (<16 x s32>) = G_LOAD %0(p4) :: (load 64, addrspace 4)
     %0:_(p4) = COPY $sgpr0_sgpr1
     %1:_(<16 x s32>) = G_LOAD %0 :: (load 64, addrspace 4)
 ...
@@ -494,7 +494,7 @@
   bb.0:
     liveins: $sgpr0_sgpr1
     ; CHECK-LABEL: name: load_constant_v8i64_uniform
-    ; CHECK (<8 x s64>) = G_LOAD %0 :: (load 64, addrspace 4)
+    ; CHECK: (<8 x s64>) = G_LOAD %0(p4) :: (load 64, addrspace 4)
     %0:_(p4) = COPY $sgpr0_sgpr1
     %1:_(<8 x s64>) = G_LOAD %0 :: (load 64, addrspace 4)
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/shrink-vop3-carry-out.mir b/llvm/test/CodeGen/AMDGPU/shrink-vop3-carry-out.mir
index 42c42a4..0eaa485 100644
--- a/llvm/test/CodeGen/AMDGPU/shrink-vop3-carry-out.mir
+++ b/llvm/test/CodeGen/AMDGPU/shrink-vop3-carry-out.mir
@@ -344,7 +344,7 @@
 ---
 # GCN-LABEL: name: shrink_addc_vop3{{$}}
 # GCN: %29:vgpr_32 = V_ADDC_U32_e32 %19, %17, implicit-def $vcc, implicit $vcc, implicit $exec
-# GCN %24 = V_CNDMASK_B32_e64 0, 0, 0, 1, killed $vcc, implicit $exec
+# GCN: %24:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, killed $vcc, implicit $exec
 
 name:            shrink_addc_vop3
 alignment:       1
diff --git a/llvm/test/CodeGen/AMDGPU/trunc-cmp-constant.ll b/llvm/test/CodeGen/AMDGPU/trunc-cmp-constant.ll
index e1644e4..9b9a807 100644
--- a/llvm/test/CodeGen/AMDGPU/trunc-cmp-constant.ll
+++ b/llvm/test/CodeGen/AMDGPU/trunc-cmp-constant.ll
@@ -3,7 +3,7 @@
 
 declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
 
-; FUNC-LABEL {{^}}sextload_i1_to_i32_trunc_cmp_eq_0:
+; FUNC-LABEL: {{^}}sextload_i1_to_i32_trunc_cmp_eq_0:
 ; SI: buffer_load_ubyte [[LOAD:v[0-9]+]]
 ; SI: v_and_b32_e32 [[TMP:v[0-9]+]], 1, [[LOAD]]
 ; SI: v_cmp_eq_u32_e32 vcc, 0, [[TMP]]{{$}}
@@ -80,9 +80,9 @@
 }
 
 
-; FUNC-LABEL {{^}}sextload_i1_to_i32_trunc_cmp_ne_0:
+; FUNC-LABEL: {{^}}sextload_i1_to_i32_trunc_cmp_ne_0:
 ; SI: buffer_load_ubyte [[LOAD:v[0-9]+]]
-; SI: v_and_b32_e32 [[TMP:v[0-9]+]], 1, [[LOAD]]
+; SI: v_and_b32_e32 [[RESULT:v[0-9]+]], 1, [[LOAD]]
 ; SI: buffer_store_byte [[RESULT]]
 define amdgpu_kernel void @sextload_i1_to_i32_trunc_cmp_ne_0(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
   %load = load i1, i1 addrspace(1)* %in