Followup on Proposal to move MIR physical register namespace to '$' sigil.
Discussed here:
http://lists.llvm.org/pipermail/llvm-dev/2018-January/120320.html
In preparation for adding support for named vregs we are changing the sigil for
physical registers in MIR to '$' from '%'. This will prevent name clashes of
named physical register with named vregs.
llvm-svn: 323922
diff --git a/llvm/test/CodeGen/X86/2006-11-17-IllegalMove.ll b/llvm/test/CodeGen/X86/2006-11-17-IllegalMove.ll
index f6c17ac..268d5af 100644
--- a/llvm/test/CodeGen/X86/2006-11-17-IllegalMove.ll
+++ b/llvm/test/CodeGen/X86/2006-11-17-IllegalMove.ll
@@ -11,7 +11,7 @@
; CHECK-NEXT: # %bb.1: # %cond_next129
; CHECK-NEXT: movb 0, %al
; CHECK-NEXT: movzbl %al, %eax
-; CHECK-NEXT: # kill: def %eax killed %eax def %ax
+; CHECK-NEXT: # kill: def $eax killed $eax def $ax
; CHECK-NEXT: divb %al
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: cmpq %rax, %rax
diff --git a/llvm/test/CodeGen/X86/2010-05-28-Crash.ll b/llvm/test/CodeGen/X86/2010-05-28-Crash.ll
index fbb0b10..aef9d5b 100644
--- a/llvm/test/CodeGen/X86/2010-05-28-Crash.ll
+++ b/llvm/test/CodeGen/X86/2010-05-28-Crash.ll
@@ -45,7 +45,7 @@
!18 = !DIFile(filename: "f.c", directory: "/tmp")
!19 = !{}
-;CHECK: DEBUG_VALUE: bar:x <- %e
+;CHECK: DEBUG_VALUE: bar:x <- $e
;CHECK: Ltmp
;CHECK: DEBUG_VALUE: foo:y <- 1{{$}}
!20 = !{i32 1, !"Debug Info Version", i32 3}
diff --git a/llvm/test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll b/llvm/test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll
index 435582e..68ae986 100644
--- a/llvm/test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll
+++ b/llvm/test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll
@@ -11,7 +11,7 @@
; Function Attrs: noinline nounwind optsize readnone ssp
define i32 @_ZN3foo3bazEi(%struct.foo* nocapture %this, i32 %x) #0 align 2 !dbg !4 {
entry:
- ; CHECK: DEBUG_VALUE: baz:this <- %rdi{{$}}
+ ; CHECK: DEBUG_VALUE: baz:this <- $rdi{{$}}
tail call void @llvm.dbg.value(metadata %struct.foo* %this, i64 0, metadata !13, metadata !16), !dbg !17
tail call void @llvm.dbg.value(metadata i32 %x, i64 0, metadata !18, metadata !16), !dbg !17
%0 = mul nsw i32 %x, 7, !dbg !19
diff --git a/llvm/test/CodeGen/X86/3addr-or.ll b/llvm/test/CodeGen/X86/3addr-or.ll
index acae1d1..1084270 100644
--- a/llvm/test/CodeGen/X86/3addr-or.ll
+++ b/llvm/test/CodeGen/X86/3addr-or.ll
@@ -5,7 +5,7 @@
define i32 @test1(i32 %x) nounwind ssp {
; CHECK-LABEL: test1:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
; CHECK-NEXT: shll $5, %edi
; CHECK-NEXT: leal 3(%rdi), %eax
; CHECK-NEXT: retq
@@ -20,7 +20,7 @@
define i64 @test2(i8 %A, i8 %B) nounwind {
; CHECK-LABEL: test2:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
; CHECK-NEXT: shll $4, %edi
; CHECK-NEXT: andl $48, %edi
; CHECK-NEXT: movzbl %sil, %eax
@@ -55,8 +55,8 @@
define i32 @test4(i32 %a, i32 %b) nounwind readnone ssp {
; CHECK-LABEL: test4:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %esi killed %esi def %rsi
-; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: # kill: def $esi killed $esi def $rsi
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
; CHECK-NEXT: andl $6, %edi
; CHECK-NEXT: andl $16, %esi
; CHECK-NEXT: leal (%rsi,%rdi), %eax
diff --git a/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll b/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll
index 0ef7c95..606a2d7 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll
@@ -28,8 +28,8 @@
define i32 @test_add_i32(i32 %arg1, i32 %arg2) {
; X64-LABEL: test_add_i32:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
-; X64-NEXT: # kill: def %esi killed %esi def %rsi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
; X64-NEXT: leal (%rsi,%rdi), %eax
; X64-NEXT: retq
;
@@ -45,10 +45,10 @@
define i16 @test_add_i16(i16 %arg1, i16 %arg2) {
; X64-LABEL: test_add_i16:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
-; X64-NEXT: # kill: def %esi killed %esi def %rsi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
; X64-NEXT: leal (%rsi,%rdi), %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
;
; X32-LABEL: test_add_i16:
diff --git a/llvm/test/CodeGen/X86/GlobalISel/ext-x86-64.ll b/llvm/test/CodeGen/X86/GlobalISel/ext-x86-64.ll
index 6b93a2b..562a689 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/ext-x86-64.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/ext-x86-64.ll
@@ -6,7 +6,7 @@
define i64 @test_zext_i1(i8 %a) {
; X64-LABEL: test_zext_i1:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: andq $1, %rdi
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/GlobalISel/ext.ll b/llvm/test/CodeGen/X86/GlobalISel/ext.ll
index 51cee2b..0960ae8 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/ext.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/ext.ll
@@ -13,7 +13,7 @@
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andb $1, %al
-; X32-NEXT: # kill: def %al killed %al killed %eax
+; X32-NEXT: # kill: def $al killed $al killed $eax
; X32-NEXT: retl
%val = trunc i32 %a to i1
%r = zext i1 %val to i8
@@ -31,7 +31,7 @@
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andw $1, %ax
-; X32-NEXT: # kill: def %ax killed %ax killed %eax
+; X32-NEXT: # kill: def $ax killed $ax killed $eax
; X32-NEXT: retl
%val = trunc i32 %a to i1
%r = zext i1 %val to i16
diff --git a/llvm/test/CodeGen/X86/GlobalISel/gep.ll b/llvm/test/CodeGen/X86/GlobalISel/gep.ll
index 97a986e..f1c5c34 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/gep.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/gep.ll
@@ -13,7 +13,7 @@
;
; X64-LABEL: test_gep_i8:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %esi killed %esi def %rsi
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
; X64-NEXT: movsbq %sil, %rax
; X64-NEXT: leaq (%rdi,%rax,4), %rax
; X64-NEXT: retq
@@ -47,7 +47,7 @@
;
; X64-LABEL: test_gep_i16:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %esi killed %esi def %rsi
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
; X64-NEXT: movswq %si, %rax
; X64-NEXT: leaq (%rdi,%rax,4), %rax
; X64-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll b/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
index 5e46126..6d13d6f 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
@@ -31,22 +31,22 @@
; X32: G_STORE [[LOAD]](s8), [[GV]](p0) :: (store 1 into @a1_8bit)
; X32: G_STORE [[LOAD6]](s8), [[GV1]](p0) :: (store 1 into @a7_8bit)
; X32: G_STORE [[LOAD7]](s8), [[GV2]](p0) :: (store 1 into @a8_8bit)
- ; X32: %al = COPY [[LOAD]](s8)
- ; X32: RET 0, implicit %al
+ ; X32: $al = COPY [[LOAD]](s8)
+ ; X32: RET 0, implicit $al
; X64-LABEL: name: test_i8_args_8
; X64: bb.1.entry:
- ; X64: liveins: %ecx, %edi, %edx, %esi, %r8d, %r9d
- ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
+ ; X64: liveins: $ecx, $edi, $edx, $esi, $r8d, $r9d
+ ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; X64: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
- ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi
+ ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
; X64: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
- ; X64: [[COPY2:%[0-9]+]]:_(s32) = COPY %edx
+ ; X64: [[COPY2:%[0-9]+]]:_(s32) = COPY $edx
; X64: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[COPY2]](s32)
- ; X64: [[COPY3:%[0-9]+]]:_(s32) = COPY %ecx
+ ; X64: [[COPY3:%[0-9]+]]:_(s32) = COPY $ecx
; X64: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[COPY3]](s32)
- ; X64: [[COPY4:%[0-9]+]]:_(s32) = COPY %r8d
+ ; X64: [[COPY4:%[0-9]+]]:_(s32) = COPY $r8d
; X64: [[TRUNC4:%[0-9]+]]:_(s8) = G_TRUNC [[COPY4]](s32)
- ; X64: [[COPY5:%[0-9]+]]:_(s32) = COPY %r9d
+ ; X64: [[COPY5:%[0-9]+]]:_(s32) = COPY $r9d
; X64: [[TRUNC5:%[0-9]+]]:_(s8) = G_TRUNC [[COPY5]](s32)
; X64: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
; X64: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 1 from %fixed-stack.1, align 0)
@@ -58,8 +58,8 @@
; X64: G_STORE [[TRUNC]](s8), [[GV]](p0) :: (store 1 into @a1_8bit)
; X64: G_STORE [[LOAD]](s8), [[GV1]](p0) :: (store 1 into @a7_8bit)
; X64: G_STORE [[LOAD1]](s8), [[GV2]](p0) :: (store 1 into @a8_8bit)
- ; X64: %al = COPY [[TRUNC]](s8)
- ; X64: RET 0, implicit %al
+ ; X64: $al = COPY [[TRUNC]](s8)
+ ; X64: RET 0, implicit $al
i8 %arg5, i8 %arg6, i8 %arg7, i8 %arg8) {
entry:
store i8 %arg1, i8* @a1_8bit
@@ -97,17 +97,17 @@
; X32: G_STORE [[LOAD]](s32), [[GV]](p0) :: (store 4 into @a1_32bit)
; X32: G_STORE [[LOAD6]](s32), [[GV1]](p0) :: (store 4 into @a7_32bit)
; X32: G_STORE [[LOAD7]](s32), [[GV2]](p0) :: (store 4 into @a8_32bit)
- ; X32: %eax = COPY [[LOAD]](s32)
- ; X32: RET 0, implicit %eax
+ ; X32: $eax = COPY [[LOAD]](s32)
+ ; X32: RET 0, implicit $eax
; X64-LABEL: name: test_i32_args_8
; X64: bb.1.entry:
- ; X64: liveins: %ecx, %edi, %edx, %esi, %r8d, %r9d
- ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
- ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi
- ; X64: [[COPY2:%[0-9]+]]:_(s32) = COPY %edx
- ; X64: [[COPY3:%[0-9]+]]:_(s32) = COPY %ecx
- ; X64: [[COPY4:%[0-9]+]]:_(s32) = COPY %r8d
- ; X64: [[COPY5:%[0-9]+]]:_(s32) = COPY %r9d
+ ; X64: liveins: $ecx, $edi, $edx, $esi, $r8d, $r9d
+ ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
+ ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
+ ; X64: [[COPY2:%[0-9]+]]:_(s32) = COPY $edx
+ ; X64: [[COPY3:%[0-9]+]]:_(s32) = COPY $ecx
+ ; X64: [[COPY4:%[0-9]+]]:_(s32) = COPY $r8d
+ ; X64: [[COPY5:%[0-9]+]]:_(s32) = COPY $r9d
; X64: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
; X64: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 0)
; X64: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
@@ -118,8 +118,8 @@
; X64: G_STORE [[COPY]](s32), [[GV]](p0) :: (store 4 into @a1_32bit)
; X64: G_STORE [[LOAD]](s32), [[GV1]](p0) :: (store 4 into @a7_32bit)
; X64: G_STORE [[LOAD1]](s32), [[GV2]](p0) :: (store 4 into @a8_32bit)
- ; X64: %eax = COPY [[COPY]](s32)
- ; X64: RET 0, implicit %eax
+ ; X64: $eax = COPY [[COPY]](s32)
+ ; X64: RET 0, implicit $eax
i32 %arg5, i32 %arg6, i32 %arg7, i32 %arg8) {
entry:
store i32 %arg1, i32* @a1_32bit
@@ -182,18 +182,18 @@
; X32: G_STORE [[MV6]](s64), [[GV1]](p0) :: (store 8 into @a7_64bit, align 4)
; X32: G_STORE [[MV7]](s64), [[GV2]](p0) :: (store 8 into @a8_64bit, align 4)
; X32: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
- ; X32: %eax = COPY [[UV]](s32)
- ; X32: %edx = COPY [[UV1]](s32)
- ; X32: RET 0, implicit %eax, implicit %edx
+ ; X32: $eax = COPY [[UV]](s32)
+ ; X32: $edx = COPY [[UV1]](s32)
+ ; X32: RET 0, implicit $eax, implicit $edx
; X64-LABEL: name: test_i64_args_8
; X64: bb.1.entry:
- ; X64: liveins: %rcx, %rdi, %rdx, %rsi, %r8, %r9
- ; X64: [[COPY:%[0-9]+]]:_(s64) = COPY %rdi
- ; X64: [[COPY1:%[0-9]+]]:_(s64) = COPY %rsi
- ; X64: [[COPY2:%[0-9]+]]:_(s64) = COPY %rdx
- ; X64: [[COPY3:%[0-9]+]]:_(s64) = COPY %rcx
- ; X64: [[COPY4:%[0-9]+]]:_(s64) = COPY %r8
- ; X64: [[COPY5:%[0-9]+]]:_(s64) = COPY %r9
+ ; X64: liveins: $rcx, $rdi, $rdx, $rsi, $r8, $r9
+ ; X64: [[COPY:%[0-9]+]]:_(s64) = COPY $rdi
+ ; X64: [[COPY1:%[0-9]+]]:_(s64) = COPY $rsi
+ ; X64: [[COPY2:%[0-9]+]]:_(s64) = COPY $rdx
+ ; X64: [[COPY3:%[0-9]+]]:_(s64) = COPY $rcx
+ ; X64: [[COPY4:%[0-9]+]]:_(s64) = COPY $r8
+ ; X64: [[COPY5:%[0-9]+]]:_(s64) = COPY $r9
; X64: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
; X64: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 8 from %fixed-stack.1, align 0)
; X64: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
@@ -204,8 +204,8 @@
; X64: G_STORE [[COPY]](s64), [[GV]](p0) :: (store 8 into @a1_64bit)
; X64: G_STORE [[LOAD]](s64), [[GV1]](p0) :: (store 8 into @a7_64bit)
; X64: G_STORE [[LOAD1]](s64), [[GV2]](p0) :: (store 8 into @a8_64bit)
- ; X64: %rax = COPY [[COPY]](s64)
- ; X64: RET 0, implicit %rax
+ ; X64: $rax = COPY [[COPY]](s64)
+ ; X64: RET 0, implicit $rax
i64 %arg5, i64 %arg6, i64 %arg7, i64 %arg8) {
; ... a bunch more that we don't track ...
entry:
@@ -222,15 +222,15 @@
; X32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 0)
; X32: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; X32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
- ; X32: %fp0 = COPY [[LOAD1]](s32)
- ; X32: RET 0, implicit %fp0
+ ; X32: $fp0 = COPY [[LOAD1]](s32)
+ ; X32: RET 0, implicit $fp0
; X64-LABEL: name: test_float_args
; X64: bb.1 (%ir-block.0):
- ; X64: liveins: %xmm0, %xmm1
- ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %xmm0
- ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY %xmm1
- ; X64: %xmm0 = COPY [[COPY1]](s32)
- ; X64: RET 0, implicit %xmm0
+ ; X64: liveins: $xmm0, $xmm1
+ ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $xmm0
+ ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY $xmm1
+ ; X64: $xmm0 = COPY [[COPY1]](s32)
+ ; X64: RET 0, implicit $xmm0
ret float %arg2
}
@@ -241,57 +241,57 @@
; X32: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 8 from %fixed-stack.1, align 0)
; X32: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; X32: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 8 from %fixed-stack.0, align 0)
- ; X32: %fp0 = COPY [[LOAD1]](s64)
- ; X32: RET 0, implicit %fp0
+ ; X32: $fp0 = COPY [[LOAD1]](s64)
+ ; X32: RET 0, implicit $fp0
; X64-LABEL: name: test_double_args
; X64: bb.1 (%ir-block.0):
- ; X64: liveins: %xmm0, %xmm1
- ; X64: [[COPY:%[0-9]+]]:_(s64) = COPY %xmm0
- ; X64: [[COPY1:%[0-9]+]]:_(s64) = COPY %xmm1
- ; X64: %xmm0 = COPY [[COPY1]](s64)
- ; X64: RET 0, implicit %xmm0
+ ; X64: liveins: $xmm0, $xmm1
+ ; X64: [[COPY:%[0-9]+]]:_(s64) = COPY $xmm0
+ ; X64: [[COPY1:%[0-9]+]]:_(s64) = COPY $xmm1
+ ; X64: $xmm0 = COPY [[COPY1]](s64)
+ ; X64: RET 0, implicit $xmm0
ret double %arg2
}
define <4 x i32> @test_v4i32_args(<4 x i32> %arg1, <4 x i32> %arg2) {
; X32-LABEL: name: test_v4i32_args
; X32: bb.1 (%ir-block.0):
- ; X32: liveins: %xmm0, %xmm1
- ; X32: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY %xmm0
- ; X32: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY %xmm1
- ; X32: %xmm0 = COPY [[COPY1]](<4 x s32>)
- ; X32: RET 0, implicit %xmm0
+ ; X32: liveins: $xmm0, $xmm1
+ ; X32: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
+ ; X32: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
+ ; X32: $xmm0 = COPY [[COPY1]](<4 x s32>)
+ ; X32: RET 0, implicit $xmm0
; X64-LABEL: name: test_v4i32_args
; X64: bb.1 (%ir-block.0):
- ; X64: liveins: %xmm0, %xmm1
- ; X64: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY %xmm0
- ; X64: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY %xmm1
- ; X64: %xmm0 = COPY [[COPY1]](<4 x s32>)
- ; X64: RET 0, implicit %xmm0
+ ; X64: liveins: $xmm0, $xmm1
+ ; X64: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
+ ; X64: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
+ ; X64: $xmm0 = COPY [[COPY1]](<4 x s32>)
+ ; X64: RET 0, implicit $xmm0
ret <4 x i32> %arg2
}
define <8 x i32> @test_v8i32_args(<8 x i32> %arg1) {
; X32-LABEL: name: test_v8i32_args
; X32: bb.1 (%ir-block.0):
- ; X32: liveins: %xmm0, %xmm1
- ; X32: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY %xmm0
- ; X32: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY %xmm1
+ ; X32: liveins: $xmm0, $xmm1
+ ; X32: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
+ ; X32: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
; X32: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>)
; X32: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[MV]](<8 x s32>)
- ; X32: %xmm0 = COPY [[UV]](<4 x s32>)
- ; X32: %xmm1 = COPY [[UV1]](<4 x s32>)
- ; X32: RET 0, implicit %xmm0, implicit %xmm1
+ ; X32: $xmm0 = COPY [[UV]](<4 x s32>)
+ ; X32: $xmm1 = COPY [[UV1]](<4 x s32>)
+ ; X32: RET 0, implicit $xmm0, implicit $xmm1
; X64-LABEL: name: test_v8i32_args
; X64: bb.1 (%ir-block.0):
- ; X64: liveins: %xmm0, %xmm1
- ; X64: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY %xmm0
- ; X64: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY %xmm1
+ ; X64: liveins: $xmm0, $xmm1
+ ; X64: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
+ ; X64: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
; X64: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>)
; X64: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[MV]](<8 x s32>)
- ; X64: %xmm0 = COPY [[UV]](<4 x s32>)
- ; X64: %xmm1 = COPY [[UV1]](<4 x s32>)
- ; X64: RET 0, implicit %xmm0, implicit %xmm1
+ ; X64: $xmm0 = COPY [[UV]](<4 x s32>)
+ ; X64: $xmm1 = COPY [[UV1]](<4 x s32>)
+ ; X64: RET 0, implicit $xmm0, implicit $xmm1
ret <8 x i32> %arg1
}
@@ -307,19 +307,19 @@
}
define i32 * @test_memop_i32(i32 * %p1) {
-;X64 liveins: %rdi
+;X64 liveins: $rdi
; X32-LABEL: name: test_memop_i32
; X32: bb.1 (%ir-block.0):
; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; X32: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
- ; X32: %eax = COPY [[LOAD]](p0)
- ; X32: RET 0, implicit %eax
+ ; X32: $eax = COPY [[LOAD]](p0)
+ ; X32: RET 0, implicit $eax
; X64-LABEL: name: test_memop_i32
; X64: bb.1 (%ir-block.0):
- ; X64: liveins: %rdi
- ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY %rdi
- ; X64: %rax = COPY [[COPY]](p0)
- ; X64: RET 0, implicit %rax
+ ; X64: liveins: $rdi
+ ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY $rdi
+ ; X64: $rax = COPY [[COPY]](p0)
+ ; X64: RET 0, implicit $rax
ret i32 * %p1;
}
@@ -327,15 +327,15 @@
define void @test_trivial_call() {
; X32-LABEL: name: test_trivial_call
; X32: bb.1 (%ir-block.0):
- ; X32: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
- ; X32: CALLpcrel32 @trivial_callee, csr_32, implicit %esp, implicit %ssp
- ; X32: ADJCALLSTACKUP32 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
+ ; X32: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+ ; X32: CALLpcrel32 @trivial_callee, csr_32, implicit $esp, implicit $ssp
+ ; X32: ADJCALLSTACKUP32 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X32: RET 0
; X64-LABEL: name: test_trivial_call
; X64: bb.1 (%ir-block.0):
- ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
- ; X64: CALL64pcrel32 @trivial_callee, csr_64, implicit %rsp, implicit %ssp
- ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
+ ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; X64: CALL64pcrel32 @trivial_callee, csr_64, implicit $rsp, implicit $ssp
+ ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
; X64: RET 0
call void @trivial_callee()
ret void
@@ -349,28 +349,28 @@
; X32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 0)
; X32: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; X32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
- ; X32: ADJCALLSTACKDOWN32 8, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
- ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: ADJCALLSTACKDOWN32 8, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+ ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; X32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s32)
; X32: G_STORE [[LOAD1]](s32), [[GEP]](p0) :: (store 4 into stack, align 0)
- ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; X32: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C1]](s32)
; X32: G_STORE [[LOAD]](s32), [[GEP1]](p0) :: (store 4 into stack + 4, align 0)
- ; X32: CALLpcrel32 @simple_arg_callee, csr_32, implicit %esp, implicit %ssp
- ; X32: ADJCALLSTACKUP32 8, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
+ ; X32: CALLpcrel32 @simple_arg_callee, csr_32, implicit $esp, implicit $ssp
+ ; X32: ADJCALLSTACKUP32 8, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X32: RET 0
; X64-LABEL: name: test_simple_arg
; X64: bb.1 (%ir-block.0):
- ; X64: liveins: %edi, %esi
- ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
- ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi
- ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
- ; X64: %edi = COPY [[COPY1]](s32)
- ; X64: %esi = COPY [[COPY]](s32)
- ; X64: CALL64pcrel32 @simple_arg_callee, csr_64, implicit %rsp, implicit %ssp, implicit %edi, implicit %esi
- ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
+ ; X64: liveins: $edi, $esi
+ ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
+ ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
+ ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; X64: $edi = COPY [[COPY1]](s32)
+ ; X64: $esi = COPY [[COPY]](s32)
+ ; X64: CALL64pcrel32 @simple_arg_callee, csr_64, implicit $rsp, implicit $ssp, implicit $edi, implicit $esi
+ ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
; X64: RET 0
call void @simple_arg_callee(i32 %in1, i32 %in0)
ret void
@@ -382,63 +382,63 @@
; X32: bb.1 (%ir-block.0):
; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; X32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
- ; X32: ADJCALLSTACKDOWN32 32, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
- ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: ADJCALLSTACKDOWN32 32, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+ ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; X32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s32)
; X32: G_STORE [[LOAD]](s32), [[GEP]](p0) :: (store 4 into stack, align 0)
- ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; X32: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C1]](s32)
; X32: G_STORE [[LOAD]](s32), [[GEP1]](p0) :: (store 4 into stack + 4, align 0)
- ; X32: [[COPY2:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: [[COPY2:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; X32: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[COPY2]], [[C2]](s32)
; X32: G_STORE [[LOAD]](s32), [[GEP2]](p0) :: (store 4 into stack + 8, align 0)
- ; X32: [[COPY3:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: [[COPY3:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
; X32: [[GEP3:%[0-9]+]]:_(p0) = G_GEP [[COPY3]], [[C3]](s32)
; X32: G_STORE [[LOAD]](s32), [[GEP3]](p0) :: (store 4 into stack + 12, align 0)
- ; X32: [[COPY4:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: [[COPY4:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; X32: [[GEP4:%[0-9]+]]:_(p0) = G_GEP [[COPY4]], [[C4]](s32)
; X32: G_STORE [[LOAD]](s32), [[GEP4]](p0) :: (store 4 into stack + 16, align 0)
- ; X32: [[COPY5:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: [[COPY5:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
; X32: [[GEP5:%[0-9]+]]:_(p0) = G_GEP [[COPY5]], [[C5]](s32)
; X32: G_STORE [[LOAD]](s32), [[GEP5]](p0) :: (store 4 into stack + 20, align 0)
- ; X32: [[COPY6:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: [[COPY6:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; X32: [[GEP6:%[0-9]+]]:_(p0) = G_GEP [[COPY6]], [[C6]](s32)
; X32: G_STORE [[LOAD]](s32), [[GEP6]](p0) :: (store 4 into stack + 24, align 0)
- ; X32: [[COPY7:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: [[COPY7:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
; X32: [[GEP7:%[0-9]+]]:_(p0) = G_GEP [[COPY7]], [[C7]](s32)
; X32: G_STORE [[LOAD]](s32), [[GEP7]](p0) :: (store 4 into stack + 28, align 0)
- ; X32: CALLpcrel32 @simple_arg8_callee, csr_32, implicit %esp, implicit %ssp
- ; X32: ADJCALLSTACKUP32 32, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
+ ; X32: CALLpcrel32 @simple_arg8_callee, csr_32, implicit $esp, implicit $ssp
+ ; X32: ADJCALLSTACKUP32 32, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X32: RET 0
; X64-LABEL: name: test_simple_arg8_call
; X64: bb.1 (%ir-block.0):
- ; X64: liveins: %edi
- ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
- ; X64: ADJCALLSTACKDOWN64 16, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
- ; X64: %edi = COPY [[COPY]](s32)
- ; X64: %esi = COPY [[COPY]](s32)
- ; X64: %edx = COPY [[COPY]](s32)
- ; X64: %ecx = COPY [[COPY]](s32)
- ; X64: %r8d = COPY [[COPY]](s32)
- ; X64: %r9d = COPY [[COPY]](s32)
- ; X64: [[COPY1:%[0-9]+]]:_(p0) = COPY %rsp
+ ; X64: liveins: $edi
+ ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
+ ; X64: ADJCALLSTACKDOWN64 16, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; X64: $edi = COPY [[COPY]](s32)
+ ; X64: $esi = COPY [[COPY]](s32)
+ ; X64: $edx = COPY [[COPY]](s32)
+ ; X64: $ecx = COPY [[COPY]](s32)
+ ; X64: $r8d = COPY [[COPY]](s32)
+ ; X64: $r9d = COPY [[COPY]](s32)
+ ; X64: [[COPY1:%[0-9]+]]:_(p0) = COPY $rsp
; X64: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; X64: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C]](s64)
; X64: G_STORE [[COPY]](s32), [[GEP]](p0) :: (store 4 into stack, align 0)
- ; X64: [[COPY2:%[0-9]+]]:_(p0) = COPY %rsp
+ ; X64: [[COPY2:%[0-9]+]]:_(p0) = COPY $rsp
; X64: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; X64: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY2]], [[C1]](s64)
; X64: G_STORE [[COPY]](s32), [[GEP1]](p0) :: (store 4 into stack + 8, align 0)
- ; X64: CALL64pcrel32 @simple_arg8_callee, csr_64, implicit %rsp, implicit %ssp, implicit %edi, implicit %esi, implicit %edx, implicit %ecx, implicit %r8d, implicit %r9d
- ; X64: ADJCALLSTACKUP64 16, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
+ ; X64: CALL64pcrel32 @simple_arg8_callee, csr_64, implicit $rsp, implicit $ssp, implicit $edi, implicit $esi, implicit $edx, implicit $ecx, implicit $r8d, implicit $r9d
+ ; X64: ADJCALLSTACKUP64 16, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
; X64: RET 0
call void @simple_arg8_callee(i32 %in0, i32 %in0, i32 %in0, i32 %in0,i32 %in0, i32 %in0, i32 %in0, i32 %in0)
ret void
@@ -449,28 +449,28 @@
; X32-LABEL: name: test_simple_return_callee
; X32: bb.1 (%ir-block.0):
; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
- ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
- ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+ ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; X32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C1]](s32)
; X32: G_STORE [[C]](s32), [[GEP]](p0) :: (store 4 into stack, align 0)
- ; X32: CALLpcrel32 @simple_return_callee, csr_32, implicit %esp, implicit %ssp, implicit-def %eax
- ; X32: [[COPY1:%[0-9]+]]:_(s32) = COPY %eax
- ; X32: ADJCALLSTACKUP32 4, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
+ ; X32: CALLpcrel32 @simple_return_callee, csr_32, implicit $esp, implicit $ssp, implicit-def $eax
+ ; X32: [[COPY1:%[0-9]+]]:_(s32) = COPY $eax
+ ; X32: ADJCALLSTACKUP32 4, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY1]]
- ; X32: %eax = COPY [[ADD]](s32)
- ; X32: RET 0, implicit %eax
+ ; X32: $eax = COPY [[ADD]](s32)
+ ; X32: RET 0, implicit $eax
; X64-LABEL: name: test_simple_return_callee
; X64: bb.1 (%ir-block.0):
; X64: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
- ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
- ; X64: %edi = COPY [[C]](s32)
- ; X64: CALL64pcrel32 @simple_return_callee, csr_64, implicit %rsp, implicit %ssp, implicit %edi, implicit-def %eax
- ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %eax
- ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
+ ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; X64: $edi = COPY [[C]](s32)
+ ; X64: CALL64pcrel32 @simple_return_callee, csr_64, implicit $rsp, implicit $ssp, implicit $edi, implicit-def $eax
+ ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $eax
+ ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
; X64: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY]]
- ; X64: %eax = COPY [[ADD]](s32)
- ; X64: RET 0, implicit %eax
+ ; X64: $eax = COPY [[ADD]](s32)
+ ; X64: RET 0, implicit $eax
%call = call i32 @simple_return_callee(i32 5)
%r = add i32 %call, %call
ret i32 %r
@@ -480,51 +480,51 @@
define <8 x i32> @test_split_return_callee(<8 x i32> %arg1, <8 x i32> %arg2) {
; X32-LABEL: name: test_split_return_callee
; X32: bb.1 (%ir-block.0):
- ; X32: liveins: %xmm0, %xmm1, %xmm2
- ; X32: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY %xmm0
- ; X32: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY %xmm1
- ; X32: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY %xmm2
+ ; X32: liveins: $xmm0, $xmm1, $xmm2
+ ; X32: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
+ ; X32: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
+ ; X32: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $xmm2
; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; X32: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 16 from %fixed-stack.0, align 0)
; X32: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>)
; X32: [[MV1:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY2]](<4 x s32>), [[LOAD]](<4 x s32>)
- ; X32: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
+ ; X32: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X32: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[MV1]](<8 x s32>)
- ; X32: %xmm0 = COPY [[UV]](<4 x s32>)
- ; X32: %xmm1 = COPY [[UV1]](<4 x s32>)
- ; X32: CALLpcrel32 @split_return_callee, csr_32, implicit %esp, implicit %ssp, implicit %xmm0, implicit %xmm1, implicit-def %xmm0, implicit-def %xmm1
- ; X32: [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY %xmm0
- ; X32: [[COPY4:%[0-9]+]]:_(<4 x s32>) = COPY %xmm1
+ ; X32: $xmm0 = COPY [[UV]](<4 x s32>)
+ ; X32: $xmm1 = COPY [[UV1]](<4 x s32>)
+ ; X32: CALLpcrel32 @split_return_callee, csr_32, implicit $esp, implicit $ssp, implicit $xmm0, implicit $xmm1, implicit-def $xmm0, implicit-def $xmm1
+ ; X32: [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
+ ; X32: [[COPY4:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
; X32: [[MV2:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY3]](<4 x s32>), [[COPY4]](<4 x s32>)
- ; X32: ADJCALLSTACKUP32 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
+ ; X32: ADJCALLSTACKUP32 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X32: [[ADD:%[0-9]+]]:_(<8 x s32>) = G_ADD [[MV]], [[MV2]]
; X32: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[ADD]](<8 x s32>)
- ; X32: %xmm0 = COPY [[UV2]](<4 x s32>)
- ; X32: %xmm1 = COPY [[UV3]](<4 x s32>)
- ; X32: RET 0, implicit %xmm0, implicit %xmm1
+ ; X32: $xmm0 = COPY [[UV2]](<4 x s32>)
+ ; X32: $xmm1 = COPY [[UV3]](<4 x s32>)
+ ; X32: RET 0, implicit $xmm0, implicit $xmm1
; X64-LABEL: name: test_split_return_callee
; X64: bb.1 (%ir-block.0):
- ; X64: liveins: %xmm0, %xmm1, %xmm2, %xmm3
- ; X64: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY %xmm0
- ; X64: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY %xmm1
- ; X64: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY %xmm2
- ; X64: [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY %xmm3
+ ; X64: liveins: $xmm0, $xmm1, $xmm2, $xmm3
+ ; X64: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
+ ; X64: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
+ ; X64: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $xmm2
+ ; X64: [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $xmm3
; X64: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>)
; X64: [[MV1:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY2]](<4 x s32>), [[COPY3]](<4 x s32>)
- ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
+ ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
; X64: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[MV1]](<8 x s32>)
- ; X64: %xmm0 = COPY [[UV]](<4 x s32>)
- ; X64: %xmm1 = COPY [[UV1]](<4 x s32>)
- ; X64: CALL64pcrel32 @split_return_callee, csr_64, implicit %rsp, implicit %ssp, implicit %xmm0, implicit %xmm1, implicit-def %xmm0, implicit-def %xmm1
- ; X64: [[COPY4:%[0-9]+]]:_(<4 x s32>) = COPY %xmm0
- ; X64: [[COPY5:%[0-9]+]]:_(<4 x s32>) = COPY %xmm1
+ ; X64: $xmm0 = COPY [[UV]](<4 x s32>)
+ ; X64: $xmm1 = COPY [[UV1]](<4 x s32>)
+ ; X64: CALL64pcrel32 @split_return_callee, csr_64, implicit $rsp, implicit $ssp, implicit $xmm0, implicit $xmm1, implicit-def $xmm0, implicit-def $xmm1
+ ; X64: [[COPY4:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
+ ; X64: [[COPY5:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
; X64: [[MV2:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY4]](<4 x s32>), [[COPY5]](<4 x s32>)
- ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
+ ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
; X64: [[ADD:%[0-9]+]]:_(<8 x s32>) = G_ADD [[MV]], [[MV2]]
; X64: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[ADD]](<8 x s32>)
- ; X64: %xmm0 = COPY [[UV2]](<4 x s32>)
- ; X64: %xmm1 = COPY [[UV3]](<4 x s32>)
- ; X64: RET 0, implicit %xmm0, implicit %xmm1
+ ; X64: $xmm0 = COPY [[UV2]](<4 x s32>)
+ ; X64: $xmm1 = COPY [[UV3]](<4 x s32>)
+ ; X64: RET 0, implicit $xmm0, implicit $xmm1
%call = call <8 x i32> @split_return_callee(<8 x i32> %arg2)
%r = add <8 x i32> %arg1, %call
ret <8 x i32> %r
@@ -535,17 +535,17 @@
; X32: bb.1 (%ir-block.0):
; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; X32: [[LOAD:%[0-9]+]]:gr32(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
- ; X32: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
- ; X32: CALL32r [[LOAD]](p0), csr_32, implicit %esp, implicit %ssp
- ; X32: ADJCALLSTACKUP32 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
+ ; X32: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+ ; X32: CALL32r [[LOAD]](p0), csr_32, implicit $esp, implicit $ssp
+ ; X32: ADJCALLSTACKUP32 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X32: RET 0
; X64-LABEL: name: test_indirect_call
; X64: bb.1 (%ir-block.0):
- ; X64: liveins: %rdi
- ; X64: [[COPY:%[0-9]+]]:gr64(p0) = COPY %rdi
- ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
- ; X64: CALL64r [[COPY]](p0), csr_64, implicit %rsp, implicit %ssp
- ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
+ ; X64: liveins: $rdi
+ ; X64: [[COPY:%[0-9]+]]:gr64(p0) = COPY $rdi
+ ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; X64: CALL64r [[COPY]](p0), csr_64, implicit $rsp, implicit $ssp
+ ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
; X64: RET 0
call void %func()
ret void
@@ -559,51 +559,51 @@
; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; X32: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
; X32: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[LOAD]](p0) :: (load 1 from %ir.addr)
- ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
- ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+ ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; X32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s32)
; X32: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD1]](s8)
; X32: G_STORE [[ANYEXT]](s32), [[GEP]](p0) :: (store 4 into stack, align 0)
- ; X32: CALLpcrel32 @take_char, csr_32, implicit %esp, implicit %ssp
- ; X32: ADJCALLSTACKUP32 4, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
- ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
- ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: CALLpcrel32 @take_char, csr_32, implicit $esp, implicit $ssp
+ ; X32: ADJCALLSTACKUP32 4, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+ ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+ ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; X32: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C1]](s32)
; X32: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD1]](s8)
; X32: G_STORE [[SEXT]](s32), [[GEP1]](p0) :: (store 4 into stack, align 0)
- ; X32: CALLpcrel32 @take_char, csr_32, implicit %esp, implicit %ssp
- ; X32: ADJCALLSTACKUP32 4, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
- ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
- ; X32: [[COPY2:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: CALLpcrel32 @take_char, csr_32, implicit $esp, implicit $ssp
+ ; X32: ADJCALLSTACKUP32 4, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+ ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+ ; X32: [[COPY2:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; X32: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[COPY2]], [[C2]](s32)
; X32: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s8)
; X32: G_STORE [[ZEXT]](s32), [[GEP2]](p0) :: (store 4 into stack, align 0)
- ; X32: CALLpcrel32 @take_char, csr_32, implicit %esp, implicit %ssp
- ; X32: ADJCALLSTACKUP32 4, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
+ ; X32: CALLpcrel32 @take_char, csr_32, implicit $esp, implicit $ssp
+ ; X32: ADJCALLSTACKUP32 4, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X32: RET 0
; X64-LABEL: name: test_abi_exts_call
; X64: bb.1 (%ir-block.0):
- ; X64: liveins: %rdi
- ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY %rdi
+ ; X64: liveins: $rdi
+ ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY $rdi
; X64: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p0) :: (load 1 from %ir.addr)
- ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
+ ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
; X64: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s8)
- ; X64: %edi = COPY [[ANYEXT]](s32)
- ; X64: CALL64pcrel32 @take_char, csr_64, implicit %rsp, implicit %ssp, implicit %edi
- ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
- ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
+ ; X64: $edi = COPY [[ANYEXT]](s32)
+ ; X64: CALL64pcrel32 @take_char, csr_64, implicit $rsp, implicit $ssp, implicit $edi
+ ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
; X64: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s8)
- ; X64: %edi = COPY [[SEXT]](s32)
- ; X64: CALL64pcrel32 @take_char, csr_64, implicit %rsp, implicit %ssp, implicit %edi
- ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
- ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
+ ; X64: $edi = COPY [[SEXT]](s32)
+ ; X64: CALL64pcrel32 @take_char, csr_64, implicit $rsp, implicit $ssp, implicit $edi
+ ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
; X64: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s8)
- ; X64: %edi = COPY [[ZEXT]](s32)
- ; X64: CALL64pcrel32 @take_char, csr_64, implicit %rsp, implicit %ssp, implicit %edi
- ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
+ ; X64: $edi = COPY [[ZEXT]](s32)
+ ; X64: CALL64pcrel32 @take_char, csr_64, implicit $rsp, implicit $ssp, implicit $edi
+ ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
; X64: RET 0
%val = load i8, i8* %addr
call void @take_char(i8 %val)
@@ -622,31 +622,31 @@
; X32: [[LOAD1:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
; X32: [[LOAD2:%[0-9]+]]:_(p0) = G_LOAD [[LOAD]](p0) :: (load 4 from %ir.addr_ptr)
; X32: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[LOAD1]](p0) :: (load 4 from %ir.val_ptr)
- ; X32: ADJCALLSTACKDOWN32 8, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
- ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: ADJCALLSTACKDOWN32 8, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+ ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; X32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s32)
; X32: G_STORE [[LOAD2]](p0), [[GEP]](p0) :: (store 4 into stack, align 0)
- ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; X32: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C1]](s32)
; X32: G_STORE [[LOAD3]](s32), [[GEP1]](p0) :: (store 4 into stack + 4, align 0)
- ; X32: CALLpcrel32 @variadic_callee, csr_32, implicit %esp, implicit %ssp
- ; X32: ADJCALLSTACKUP32 8, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
+ ; X32: CALLpcrel32 @variadic_callee, csr_32, implicit $esp, implicit $ssp
+ ; X32: ADJCALLSTACKUP32 8, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X32: RET 0
; X64-LABEL: name: test_variadic_call_1
; X64: bb.1 (%ir-block.0):
- ; X64: liveins: %rdi, %rsi
- ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY %rdi
- ; X64: [[COPY1:%[0-9]+]]:_(p0) = COPY %rsi
+ ; X64: liveins: $rdi, $rsi
+ ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY $rdi
+ ; X64: [[COPY1:%[0-9]+]]:_(p0) = COPY $rsi
; X64: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0) :: (load 8 from %ir.addr_ptr)
; X64: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[COPY1]](p0) :: (load 4 from %ir.val_ptr)
- ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
- ; X64: %rdi = COPY [[LOAD]](p0)
- ; X64: %esi = COPY [[LOAD1]](s32)
- ; X64: %al = MOV8ri 0
- ; X64: CALL64pcrel32 @variadic_callee, csr_64, implicit %rsp, implicit %ssp, implicit %rdi, implicit %esi, implicit %al
- ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
+ ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; X64: $rdi = COPY [[LOAD]](p0)
+ ; X64: $esi = COPY [[LOAD1]](s32)
+ ; X64: $al = MOV8ri 0
+ ; X64: CALL64pcrel32 @variadic_callee, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $esi, implicit $al
+ ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
; X64: RET 0
%addr = load i8*, i8** %addr_ptr
%val = load i32, i32* %val_ptr
@@ -663,31 +663,31 @@
; X32: [[LOAD1:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
; X32: [[LOAD2:%[0-9]+]]:_(p0) = G_LOAD [[LOAD]](p0) :: (load 4 from %ir.addr_ptr)
; X32: [[LOAD3:%[0-9]+]]:_(s64) = G_LOAD [[LOAD1]](p0) :: (load 8 from %ir.val_ptr, align 4)
- ; X32: ADJCALLSTACKDOWN32 12, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
- ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: ADJCALLSTACKDOWN32 12, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+ ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; X32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s32)
; X32: G_STORE [[LOAD2]](p0), [[GEP]](p0) :: (store 4 into stack, align 0)
- ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; X32: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C1]](s32)
; X32: G_STORE [[LOAD3]](s64), [[GEP1]](p0) :: (store 8 into stack + 4, align 0)
- ; X32: CALLpcrel32 @variadic_callee, csr_32, implicit %esp, implicit %ssp
- ; X32: ADJCALLSTACKUP32 12, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
+ ; X32: CALLpcrel32 @variadic_callee, csr_32, implicit $esp, implicit $ssp
+ ; X32: ADJCALLSTACKUP32 12, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X32: RET 0
; X64-LABEL: name: test_variadic_call_2
; X64: bb.1 (%ir-block.0):
- ; X64: liveins: %rdi, %rsi
- ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY %rdi
- ; X64: [[COPY1:%[0-9]+]]:_(p0) = COPY %rsi
+ ; X64: liveins: $rdi, $rsi
+ ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY $rdi
+ ; X64: [[COPY1:%[0-9]+]]:_(p0) = COPY $rsi
; X64: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0) :: (load 8 from %ir.addr_ptr)
; X64: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[COPY1]](p0) :: (load 8 from %ir.val_ptr)
- ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
- ; X64: %rdi = COPY [[LOAD]](p0)
- ; X64: %xmm0 = COPY [[LOAD1]](s64)
- ; X64: %al = MOV8ri 1
- ; X64: CALL64pcrel32 @variadic_callee, csr_64, implicit %rsp, implicit %ssp, implicit %rdi, implicit %xmm0, implicit %al
- ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
+ ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; X64: $rdi = COPY [[LOAD]](p0)
+ ; X64: $xmm0 = COPY [[LOAD1]](s64)
+ ; X64: $al = MOV8ri 1
+ ; X64: CALL64pcrel32 @variadic_callee, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $xmm0, implicit $al
+ ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
; X64: RET 0
%addr = load i8*, i8** %addr_ptr
%val = load double, double* %val_ptr
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-add-v128.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-add-v128.mir
index abd88f4..99ae63d 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-add-v128.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-add-v128.mir
@@ -33,18 +33,18 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; ALL-LABEL: name: test_add_v16i8
; ALL: [[DEF:%[0-9]+]]:_(<16 x s8>) = IMPLICIT_DEF
; ALL: [[DEF1:%[0-9]+]]:_(<16 x s8>) = IMPLICIT_DEF
; ALL: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[DEF]], [[DEF1]]
- ; ALL: %xmm0 = COPY [[ADD]](<16 x s8>)
+ ; ALL: $xmm0 = COPY [[ADD]](<16 x s8>)
; ALL: RET 0
%0(<16 x s8>) = IMPLICIT_DEF
%1(<16 x s8>) = IMPLICIT_DEF
%2(<16 x s8>) = G_ADD %0, %1
- %xmm0 = COPY %2
+ $xmm0 = COPY %2
RET 0
...
@@ -59,18 +59,18 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; ALL-LABEL: name: test_add_v8i16
; ALL: [[DEF:%[0-9]+]]:_(<8 x s16>) = IMPLICIT_DEF
; ALL: [[DEF1:%[0-9]+]]:_(<8 x s16>) = IMPLICIT_DEF
; ALL: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[DEF]], [[DEF1]]
- ; ALL: %xmm0 = COPY [[ADD]](<8 x s16>)
+ ; ALL: $xmm0 = COPY [[ADD]](<8 x s16>)
; ALL: RET 0
%0(<8 x s16>) = IMPLICIT_DEF
%1(<8 x s16>) = IMPLICIT_DEF
%2(<8 x s16>) = G_ADD %0, %1
- %xmm0 = COPY %2
+ $xmm0 = COPY %2
RET 0
...
@@ -85,18 +85,18 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; ALL-LABEL: name: test_add_v4i32
; ALL: [[DEF:%[0-9]+]]:_(<4 x s32>) = IMPLICIT_DEF
; ALL: [[DEF1:%[0-9]+]]:_(<4 x s32>) = IMPLICIT_DEF
; ALL: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[DEF]], [[DEF1]]
- ; ALL: %xmm0 = COPY [[ADD]](<4 x s32>)
+ ; ALL: $xmm0 = COPY [[ADD]](<4 x s32>)
; ALL: RET 0
%0(<4 x s32>) = IMPLICIT_DEF
%1(<4 x s32>) = IMPLICIT_DEF
%2(<4 x s32>) = G_ADD %0, %1
- %xmm0 = COPY %2
+ $xmm0 = COPY %2
RET 0
...
@@ -111,18 +111,18 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; ALL-LABEL: name: test_add_v2i64
; ALL: [[DEF:%[0-9]+]]:_(<2 x s64>) = IMPLICIT_DEF
; ALL: [[DEF1:%[0-9]+]]:_(<2 x s64>) = IMPLICIT_DEF
; ALL: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[DEF]], [[DEF1]]
- ; ALL: %xmm0 = COPY [[ADD]](<2 x s64>)
+ ; ALL: $xmm0 = COPY [[ADD]](<2 x s64>)
; ALL: RET 0
%0(<2 x s64>) = IMPLICIT_DEF
%1(<2 x s64>) = IMPLICIT_DEF
%2(<2 x s64>) = G_ADD %0, %1
- %xmm0 = COPY %2
+ $xmm0 = COPY %2
RET 0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-add-v256.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-add-v256.mir
index bea9161..6924052 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-add-v256.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-add-v256.mir
@@ -36,7 +36,7 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; CHECK-LABEL: name: test_add_v32i8
; ALL: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
@@ -51,15 +51,15 @@
; AVX1: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV3]]
; SSE2: [[MV:%[0-9]+]]:_(<32 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>)
; AVX1: [[MV:%[0-9]+]]:_(<32 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>)
- ; SSE2: %ymm0 = COPY [[MV]](<32 x s8>)
- ; AVX1: %ymm0 = COPY [[MV]](<32 x s8>)
+ ; SSE2: $ymm0 = COPY [[MV]](<32 x s8>)
+ ; AVX1: $ymm0 = COPY [[MV]](<32 x s8>)
; AVX2: [[ADD:%[0-9]+]]:_(<32 x s8>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX2: %ymm0 = COPY [[ADD]](<32 x s8>)
+ ; AVX2: $ymm0 = COPY [[ADD]](<32 x s8>)
; ALL: RET 0
%0(<32 x s8>) = IMPLICIT_DEF
%1(<32 x s8>) = IMPLICIT_DEF
%2(<32 x s8>) = G_ADD %0, %1
- %ymm0 = COPY %2
+ $ymm0 = COPY %2
RET 0
...
@@ -74,7 +74,7 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; ALL-LABEL: name: test_add_v16i16
; ALL: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
@@ -89,15 +89,15 @@
; AVX1: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV]], [[UV2]]
; AVX1: [[ADD1:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV1]], [[UV3]]
; AVX1: [[MV:%[0-9]+]]:_(<16 x s16>) = G_MERGE_VALUES [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>)
- ; SSE2: %ymm0 = COPY [[MV]](<16 x s16>)
- ; AVX1: %ymm0 = COPY [[MV]](<16 x s16>)
+ ; SSE2: $ymm0 = COPY [[MV]](<16 x s16>)
+ ; AVX1: $ymm0 = COPY [[MV]](<16 x s16>)
; AVX2: [[ADD:%[0-9]+]]:_(<16 x s16>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX2: %ymm0 = COPY [[ADD]](<16 x s16>)
+ ; AVX2: $ymm0 = COPY [[ADD]](<16 x s16>)
; ALL: RET 0
%0(<16 x s16>) = IMPLICIT_DEF
%1(<16 x s16>) = IMPLICIT_DEF
%2(<16 x s16>) = G_ADD %0, %1
- %ymm0 = COPY %2
+ $ymm0 = COPY %2
RET 0
...
@@ -112,7 +112,7 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; ALL-LABEL: name: test_add_v8i32
; ALL: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
@@ -122,20 +122,20 @@
; SSE2: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV]], [[UV2]]
; SSE2: [[ADD1:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV1]], [[UV3]]
; SSE2: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>)
- ; SSE2: %ymm0 = COPY [[MV]](<8 x s32>)
+ ; SSE2: $ymm0 = COPY [[MV]](<8 x s32>)
; AVX1: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<8 x s32>)
; AVX1: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<8 x s32>)
; AVX1: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV]], [[UV2]]
; AVX1: [[ADD1:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV1]], [[UV3]]
; AVX1: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>)
- ; AVX1: %ymm0 = COPY [[MV]](<8 x s32>)
+ ; AVX1: $ymm0 = COPY [[MV]](<8 x s32>)
; AVX2: [[ADD:%[0-9]+]]:_(<8 x s32>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX2: %ymm0 = COPY [[ADD]](<8 x s32>)
+ ; AVX2: $ymm0 = COPY [[ADD]](<8 x s32>)
; ALL: RET 0
%0(<8 x s32>) = IMPLICIT_DEF
%1(<8 x s32>) = IMPLICIT_DEF
%2(<8 x s32>) = G_ADD %0, %1
- %ymm0 = COPY %2
+ $ymm0 = COPY %2
RET 0
...
@@ -150,7 +150,7 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; ALL-LABEL: name: test_add_v4i64
; ALL: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
@@ -165,15 +165,15 @@
; AVX1: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV]], [[UV2]]
; AVX1: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV1]], [[UV3]]
; AVX1: [[MV:%[0-9]+]]:_(<4 x s64>) = G_MERGE_VALUES [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>)
- ; SSE2: %ymm0 = COPY [[MV]](<4 x s64>)
- ; AVX1: %ymm0 = COPY [[MV]](<4 x s64>)
+ ; SSE2: $ymm0 = COPY [[MV]](<4 x s64>)
+ ; AVX1: $ymm0 = COPY [[MV]](<4 x s64>)
; AVX2: [[ADD:%[0-9]+]]:_(<4 x s64>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX2: %ymm0 = COPY [[ADD]](<4 x s64>)
+ ; AVX2: $ymm0 = COPY [[ADD]](<4 x s64>)
; ALL: RET 0
%0(<4 x s64>) = IMPLICIT_DEF
%1(<4 x s64>) = IMPLICIT_DEF
%2(<4 x s64>) = G_ADD %0, %1
- %ymm0 = COPY %2
+ $ymm0 = COPY %2
RET 0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-add-v512.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-add-v512.mir
index 81b66d1..01183ae 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-add-v512.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-add-v512.mir
@@ -40,7 +40,7 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_add_v64i8
; ALL: [[DEF:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
@@ -52,20 +52,20 @@
; AVX1: [[ADD2:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV2]], [[UV6]]
; AVX1: [[ADD3:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV3]], [[UV7]]
; AVX1: [[MV:%[0-9]+]]:_(<64 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>), [[ADD2]](<16 x s8>), [[ADD3]](<16 x s8>)
- ; AVX1: %zmm0 = COPY [[MV]](<64 x s8>)
+ ; AVX1: $zmm0 = COPY [[MV]](<64 x s8>)
; AVX512F: [[UV:%[0-9]+]]:_(<32 x s8>), [[UV1:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[DEF]](<64 x s8>)
; AVX512F: [[UV2:%[0-9]+]]:_(<32 x s8>), [[UV3:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[DEF1]](<64 x s8>)
; AVX512F: [[ADD:%[0-9]+]]:_(<32 x s8>) = G_ADD [[UV]], [[UV2]]
; AVX512F: [[ADD1:%[0-9]+]]:_(<32 x s8>) = G_ADD [[UV1]], [[UV3]]
; AVX512F: [[MV:%[0-9]+]]:_(<64 x s8>) = G_MERGE_VALUES [[ADD]](<32 x s8>), [[ADD1]](<32 x s8>)
- ; AVX512F: %zmm0 = COPY [[MV]](<64 x s8>)
+ ; AVX512F: $zmm0 = COPY [[MV]](<64 x s8>)
; AVX512BW: [[ADD:%[0-9]+]]:_(<64 x s8>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX512BW: %zmm0 = COPY [[ADD]](<64 x s8>)
+ ; AVX512BW: $zmm0 = COPY [[ADD]](<64 x s8>)
; ALL: RET 0
%0(<64 x s8>) = IMPLICIT_DEF
%1(<64 x s8>) = IMPLICIT_DEF
%2(<64 x s8>) = G_ADD %0, %1
- %zmm0 = COPY %2
+ $zmm0 = COPY %2
RET 0
...
@@ -80,7 +80,7 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_add_v32i16
; ALL: [[DEF:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
@@ -92,20 +92,20 @@
; AVX1: [[ADD2:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV2]], [[UV6]]
; AVX1: [[ADD3:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV3]], [[UV7]]
; AVX1: [[MV:%[0-9]+]]:_(<32 x s16>) = G_MERGE_VALUES [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>), [[ADD2]](<8 x s16>), [[ADD3]](<8 x s16>)
- ; AVX1: %zmm0 = COPY [[MV]](<32 x s16>)
+ ; AVX1: $zmm0 = COPY [[MV]](<32 x s16>)
; AVX512F: [[UV:%[0-9]+]]:_(<16 x s16>), [[UV1:%[0-9]+]]:_(<16 x s16>) = G_UNMERGE_VALUES [[DEF]](<32 x s16>)
; AVX512F: [[UV2:%[0-9]+]]:_(<16 x s16>), [[UV3:%[0-9]+]]:_(<16 x s16>) = G_UNMERGE_VALUES [[DEF1]](<32 x s16>)
; AVX512F: [[ADD:%[0-9]+]]:_(<16 x s16>) = G_ADD [[UV]], [[UV2]]
; AVX512F: [[ADD1:%[0-9]+]]:_(<16 x s16>) = G_ADD [[UV1]], [[UV3]]
; AVX512F: [[MV:%[0-9]+]]:_(<32 x s16>) = G_MERGE_VALUES [[ADD]](<16 x s16>), [[ADD1]](<16 x s16>)
- ; AVX512F: %zmm0 = COPY [[MV]](<32 x s16>)
+ ; AVX512F: $zmm0 = COPY [[MV]](<32 x s16>)
; AVX512BW: [[ADD:%[0-9]+]]:_(<32 x s16>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX512BW: %zmm0 = COPY [[ADD]](<32 x s16>)
+ ; AVX512BW: $zmm0 = COPY [[ADD]](<32 x s16>)
; ALL: RET 0
%0(<32 x s16>) = IMPLICIT_DEF
%1(<32 x s16>) = IMPLICIT_DEF
%2(<32 x s16>) = G_ADD %0, %1
- %zmm0 = COPY %2
+ $zmm0 = COPY %2
RET 0
...
@@ -120,7 +120,7 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_add_v16i32
; ALL: [[DEF:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
@@ -132,16 +132,16 @@
; AVX1: [[ADD2:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV2]], [[UV6]]
; AVX1: [[ADD3:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV3]], [[UV7]]
; AVX1: [[MV:%[0-9]+]]:_(<16 x s32>) = G_MERGE_VALUES [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>), [[ADD2]](<4 x s32>), [[ADD3]](<4 x s32>)
- ; AVX1: %zmm0 = COPY [[MV]](<16 x s32>)
+ ; AVX1: $zmm0 = COPY [[MV]](<16 x s32>)
; AVX512F: [[ADD:%[0-9]+]]:_(<16 x s32>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX512F: %zmm0 = COPY [[ADD]](<16 x s32>)
+ ; AVX512F: $zmm0 = COPY [[ADD]](<16 x s32>)
; AVX512BW: [[ADD:%[0-9]+]]:_(<16 x s32>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX512BW: %zmm0 = COPY [[ADD]](<16 x s32>)
+ ; AVX512BW: $zmm0 = COPY [[ADD]](<16 x s32>)
; ALL: RET 0
%0(<16 x s32>) = IMPLICIT_DEF
%1(<16 x s32>) = IMPLICIT_DEF
%2(<16 x s32>) = G_ADD %0, %1
- %zmm0 = COPY %2
+ $zmm0 = COPY %2
RET 0
...
@@ -156,7 +156,7 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_add_v8i64
; ALL: [[DEF:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
@@ -168,16 +168,16 @@
; AVX1: [[ADD2:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV2]], [[UV6]]
; AVX1: [[ADD3:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV3]], [[UV7]]
; AVX1: [[MV:%[0-9]+]]:_(<8 x s64>) = G_MERGE_VALUES [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>), [[ADD2]](<2 x s64>), [[ADD3]](<2 x s64>)
- ; AVX1: %zmm0 = COPY [[MV]](<8 x s64>)
+ ; AVX1: $zmm0 = COPY [[MV]](<8 x s64>)
; AVX512F: [[ADD:%[0-9]+]]:_(<8 x s64>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX512F: %zmm0 = COPY [[ADD]](<8 x s64>)
+ ; AVX512F: $zmm0 = COPY [[ADD]](<8 x s64>)
; AVX512BW: [[ADD:%[0-9]+]]:_(<8 x s64>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX512BW: %zmm0 = COPY [[ADD]](<8 x s64>)
+ ; AVX512BW: $zmm0 = COPY [[ADD]](<8 x s64>)
; ALL: RET 0
%0(<8 x s64>) = IMPLICIT_DEF
%1(<8 x s64>) = IMPLICIT_DEF
%2(<8 x s64>) = G_ADD %0, %1
- %zmm0 = COPY %2
+ $zmm0 = COPY %2
RET 0
...
@@ -200,13 +200,13 @@
#
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1, %ymm2, %ymm3
+ liveins: $ymm0, $ymm1, $ymm2, $ymm3
; ALL-LABEL: name: test_add_v64i8_2
- ; ALL: [[COPY:%[0-9]+]]:_(<32 x s8>) = COPY %ymm0
- ; ALL: [[COPY1:%[0-9]+]]:_(<32 x s8>) = COPY %ymm1
- ; ALL: [[COPY2:%[0-9]+]]:_(<32 x s8>) = COPY %ymm2
- ; ALL: [[COPY3:%[0-9]+]]:_(<32 x s8>) = COPY %ymm3
+ ; ALL: [[COPY:%[0-9]+]]:_(<32 x s8>) = COPY $ymm0
+ ; ALL: [[COPY1:%[0-9]+]]:_(<32 x s8>) = COPY $ymm1
+ ; ALL: [[COPY2:%[0-9]+]]:_(<32 x s8>) = COPY $ymm2
+ ; ALL: [[COPY3:%[0-9]+]]:_(<32 x s8>) = COPY $ymm3
; AVX1: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[COPY]](<32 x s8>)
; AVX1: [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[COPY1]](<32 x s8>)
; AVX1: [[UV4:%[0-9]+]]:_(<16 x s8>), [[UV5:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[COPY2]](<32 x s8>)
@@ -217,29 +217,29 @@
; AVX1: [[ADD3:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV3]], [[UV7]]
; AVX1: [[MV:%[0-9]+]]:_(<32 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>)
; AVX1: [[MV1:%[0-9]+]]:_(<32 x s8>) = G_MERGE_VALUES [[ADD2]](<16 x s8>), [[ADD3]](<16 x s8>)
- ; AVX1: %ymm0 = COPY [[MV]](<32 x s8>)
- ; AVX1: %ymm1 = COPY [[MV1]](<32 x s8>)
+ ; AVX1: $ymm0 = COPY [[MV]](<32 x s8>)
+ ; AVX1: $ymm1 = COPY [[MV1]](<32 x s8>)
; AVX512F: [[ADD:%[0-9]+]]:_(<32 x s8>) = G_ADD [[COPY]], [[COPY2]]
; AVX512F: [[ADD1:%[0-9]+]]:_(<32 x s8>) = G_ADD [[COPY1]], [[COPY3]]
- ; AVX512F: %ymm0 = COPY [[ADD]](<32 x s8>)
- ; AVX512F: %ymm1 = COPY [[ADD1]](<32 x s8>)
+ ; AVX512F: $ymm0 = COPY [[ADD]](<32 x s8>)
+ ; AVX512F: $ymm1 = COPY [[ADD1]](<32 x s8>)
; AVX512BW: [[MV:%[0-9]+]]:_(<64 x s8>) = G_MERGE_VALUES [[COPY]](<32 x s8>), [[COPY1]](<32 x s8>)
; AVX512BW: [[MV1:%[0-9]+]]:_(<64 x s8>) = G_MERGE_VALUES [[COPY2]](<32 x s8>), [[COPY3]](<32 x s8>)
; AVX512BW: [[ADD:%[0-9]+]]:_(<64 x s8>) = G_ADD [[MV]], [[MV1]]
; AVX512BW: [[UV:%[0-9]+]]:_(<32 x s8>), [[UV1:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[ADD]](<64 x s8>)
- ; AVX512BW: %ymm0 = COPY [[UV]](<32 x s8>)
- ; AVX512BW: %ymm1 = COPY [[UV1]](<32 x s8>)
- ; ALL: RET 0, implicit %ymm0, implicit %ymm1
- %2(<32 x s8>) = COPY %ymm0
- %3(<32 x s8>) = COPY %ymm1
- %4(<32 x s8>) = COPY %ymm2
- %5(<32 x s8>) = COPY %ymm3
+ ; AVX512BW: $ymm0 = COPY [[UV]](<32 x s8>)
+ ; AVX512BW: $ymm1 = COPY [[UV1]](<32 x s8>)
+ ; ALL: RET 0, implicit $ymm0, implicit $ymm1
+ %2(<32 x s8>) = COPY $ymm0
+ %3(<32 x s8>) = COPY $ymm1
+ %4(<32 x s8>) = COPY $ymm2
+ %5(<32 x s8>) = COPY $ymm3
%0(<64 x s8>) = G_MERGE_VALUES %2(<32 x s8>), %3(<32 x s8>)
%1(<64 x s8>) = G_MERGE_VALUES %4(<32 x s8>), %5(<32 x s8>)
%6(<64 x s8>) = G_ADD %0, %1
%7(<32 x s8>), %8(<32 x s8>) = G_UNMERGE_VALUES %6(<64 x s8>)
- %ymm0 = COPY %7(<32 x s8>)
- %ymm1 = COPY %8(<32 x s8>)
- RET 0, implicit %ymm0, implicit %ymm1
+ $ymm0 = COPY %7(<32 x s8>)
+ $ymm1 = COPY %8(<32 x s8>)
+ RET 0, implicit $ymm0, implicit $ymm1
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-add.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-add.mir
index 9d50fc4..9be876fa 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-add.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-add.mir
@@ -18,7 +18,7 @@
- { id: 0, class: _, preferred-register: '' }
- { id: 1, class: _, preferred-register: '' }
- { id: 2, class: _, preferred-register: '' }
-# CHECK: %0(s32) = COPY %edx
+# CHECK: %0(s32) = COPY $edx
# CHECK-NEXT: %3(s8) = G_TRUNC %0(s32)
# CHECK-NEXT: %4(s8) = G_TRUNC %0(s32)
# CHECK-NEXT: %5(s8) = G_ADD %3, %4
@@ -27,26 +27,26 @@
bb.1 (%ir-block.0):
; X64-LABEL: name: test_add_i1
- ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %edx
+ ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edx
; X64: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; X64: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; X64: [[ADD:%[0-9]+]]:_(s8) = G_ADD [[TRUNC]], [[TRUNC1]]
; X64: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ADD]](s8)
- ; X64: %eax = COPY [[ANYEXT]](s32)
+ ; X64: $eax = COPY [[ANYEXT]](s32)
; X64: RET 0
; X32-LABEL: name: test_add_i1
- ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY %edx
+ ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY $edx
; X32: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; X32: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; X32: [[ADD:%[0-9]+]]:_(s8) = G_ADD [[TRUNC]], [[TRUNC1]]
; X32: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ADD]](s8)
- ; X32: %eax = COPY [[ANYEXT]](s32)
+ ; X32: $eax = COPY [[ANYEXT]](s32)
; X32: RET 0
- %0(s32) = COPY %edx
+ %0(s32) = COPY $edx
%1(s1) = G_TRUNC %0(s32)
%2(s1) = G_ADD %1, %1
%3:_(s32) = G_ANYEXT %2
- %eax = COPY %3
+ $eax = COPY %3
RET 0
...
---
@@ -64,18 +64,18 @@
; X64: [[DEF:%[0-9]+]]:_(s32) = IMPLICIT_DEF
; X64: [[DEF1:%[0-9]+]]:_(s32) = IMPLICIT_DEF
; X64: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[DEF]], [[DEF1]]
- ; X64: %eax = COPY [[ADD]](s32)
+ ; X64: $eax = COPY [[ADD]](s32)
; X64: RET 0
; X32-LABEL: name: test_add_i32
; X32: [[DEF:%[0-9]+]]:_(s32) = IMPLICIT_DEF
; X32: [[DEF1:%[0-9]+]]:_(s32) = IMPLICIT_DEF
; X32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[DEF]], [[DEF1]]
- ; X32: %eax = COPY [[ADD]](s32)
+ ; X32: $eax = COPY [[ADD]](s32)
; X32: RET 0
%0(s32) = IMPLICIT_DEF
%1(s32) = IMPLICIT_DEF
%2(s32) = G_ADD %0, %1
- %eax = COPY %2
+ $eax = COPY %2
RET 0
...
@@ -94,7 +94,7 @@
; X64: [[DEF:%[0-9]+]]:_(s64) = IMPLICIT_DEF
; X64: [[DEF1:%[0-9]+]]:_(s64) = IMPLICIT_DEF
; X64: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[DEF]], [[DEF1]]
- ; X64: %rax = COPY [[ADD]](s64)
+ ; X64: $rax = COPY [[ADD]](s64)
; X64: RET 0
; X32-LABEL: name: test_add_i64
; X32: [[DEF:%[0-9]+]]:_(s64) = IMPLICIT_DEF
@@ -106,12 +106,12 @@
; X32: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV]], [[UV2]], [[TRUNC]]
; X32: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV3]], [[UADDE1]]
; X32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDE]](s32), [[UADDE2]](s32)
- ; X32: %rax = COPY [[MV]](s64)
+ ; X32: $rax = COPY [[MV]](s64)
; X32: RET 0
%0(s64) = IMPLICIT_DEF
%1(s64) = IMPLICIT_DEF
%2(s64) = G_ADD %0, %1
- %rax = COPY %2
+ $rax = COPY %2
RET 0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-and-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-and-scalar.mir
index 44ccdd8..06db701 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-and-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-and-scalar.mir
@@ -41,18 +41,18 @@
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_and_i1
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edx
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edx
; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s8) = G_AND [[TRUNC]], [[TRUNC1]]
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[AND]](s8)
- ; CHECK: %eax = COPY [[ANYEXT]](s32)
+ ; CHECK: $eax = COPY [[ANYEXT]](s32)
; CHECK: RET 0
- %0(s32) = COPY %edx
+ %0(s32) = COPY $edx
%1(s1) = G_TRUNC %0(s32)
%2(s1) = G_AND %1, %1
%3:_(s32) = G_ANYEXT %2
- %eax = COPY %3
+ $eax = COPY %3
RET 0
...
---
@@ -72,12 +72,12 @@
; CHECK-LABEL: name: test_and_i8
; CHECK: [[DEF:%[0-9]+]]:_(s8) = IMPLICIT_DEF
; CHECK: [[AND:%[0-9]+]]:_(s8) = G_AND [[DEF]], [[DEF]]
- ; CHECK: %al = COPY [[AND]](s8)
- ; CHECK: RET 0, implicit %al
+ ; CHECK: $al = COPY [[AND]](s8)
+ ; CHECK: RET 0, implicit $al
%0(s8) = IMPLICIT_DEF
%1(s8) = G_AND %0, %0
- %al = COPY %1(s8)
- RET 0, implicit %al
+ $al = COPY %1(s8)
+ RET 0, implicit $al
...
---
@@ -97,12 +97,12 @@
; CHECK-LABEL: name: test_and_i16
; CHECK: [[DEF:%[0-9]+]]:_(s16) = IMPLICIT_DEF
; CHECK: [[AND:%[0-9]+]]:_(s16) = G_AND [[DEF]], [[DEF]]
- ; CHECK: %ax = COPY [[AND]](s16)
- ; CHECK: RET 0, implicit %ax
+ ; CHECK: $ax = COPY [[AND]](s16)
+ ; CHECK: RET 0, implicit $ax
%0(s16) = IMPLICIT_DEF
%1(s16) = G_AND %0, %0
- %ax = COPY %1(s16)
- RET 0, implicit %ax
+ $ax = COPY %1(s16)
+ RET 0, implicit $ax
...
---
@@ -122,12 +122,12 @@
; CHECK-LABEL: name: test_and_i32
; CHECK: [[DEF:%[0-9]+]]:_(s32) = IMPLICIT_DEF
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[DEF]], [[DEF]]
- ; CHECK: %eax = COPY [[AND]](s32)
- ; CHECK: RET 0, implicit %eax
+ ; CHECK: $eax = COPY [[AND]](s32)
+ ; CHECK: RET 0, implicit $eax
%0(s32) = IMPLICIT_DEF
%1(s32) = G_AND %0, %0
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -147,11 +147,11 @@
; CHECK-LABEL: name: test_and_i64
; CHECK: [[DEF:%[0-9]+]]:_(s64) = IMPLICIT_DEF
; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[DEF]], [[DEF]]
- ; CHECK: %rax = COPY [[AND]](s64)
- ; CHECK: RET 0, implicit %rax
+ ; CHECK: $rax = COPY [[AND]](s64)
+ ; CHECK: RET 0, implicit $rax
%0(s64) = IMPLICIT_DEF
%1(s64) = G_AND %0, %0
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-brcond.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-brcond.mir
index 946e738..d645ad3 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-brcond.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-brcond.mir
@@ -30,17 +30,17 @@
# ALL-NEXT: G_BRCOND %1(s1), %[[TRUE:bb.[0-9]+]]
# ALL-NEXT: G_BR %[[FALSE:bb.[0-9]+]]
# ALL: [[TRUE]].{{[a-zA-Z0-9.]+}}:
-# ALL-NEXT: %eax = COPY %2(s32)
-# ALL-NEXT: RET 0, implicit %eax
+# ALL-NEXT: $eax = COPY %2(s32)
+# ALL-NEXT: RET 0, implicit $eax
# ALL: [[FALSE]].{{[a-zA-Z0-9.]+}}:
-# ALL-NEXT: %eax = COPY %3(s32)
-# ALL-NEXT: RET 0, implicit %eax
+# ALL-NEXT: $eax = COPY %3(s32)
+# ALL-NEXT: RET 0, implicit $eax
body: |
bb.1.entry:
successors: %bb.2(0x40000000), %bb.3(0x40000000)
- liveins: %edi
+ liveins: $edi
- %0(s32) = COPY %edi
+ %0(s32) = COPY $edi
%2(s32) = G_CONSTANT i32 0
%3(s32) = G_CONSTANT i32 1
%1(s1) = G_TRUNC %0(s32)
@@ -48,11 +48,11 @@
G_BR %bb.3
bb.2.if.then:
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
bb.3.if.else:
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-cmp.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-cmp.mir
index c3e7b77..e936672 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-cmp.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-cmp.mir
@@ -45,21 +45,21 @@
- { id: 3, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_cmp_i8
- ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
- ; CHECK: [[COPY1:%[0-9]+]]:_(s8) = COPY %sil
+ ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s8) = COPY $sil
; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY]](s8), [[COPY1]]
; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
- ; CHECK: %eax = COPY [[ZEXT]](s32)
- ; CHECK: RET 0, implicit %eax
- %0(s8) = COPY %dil
- %1(s8) = COPY %sil
+ ; CHECK: $eax = COPY [[ZEXT]](s32)
+ ; CHECK: RET 0, implicit $eax
+ %0(s8) = COPY $dil
+ %1(s8) = COPY $sil
%2(s1) = G_ICMP intpred(ult), %0(s8), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -74,21 +74,21 @@
- { id: 3, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_cmp_i16
- ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY %di
- ; CHECK: [[COPY1:%[0-9]+]]:_(s16) = COPY %si
+ ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY $di
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s16) = COPY $si
; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY]](s16), [[COPY1]]
; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
- ; CHECK: %eax = COPY [[ZEXT]](s32)
- ; CHECK: RET 0, implicit %eax
- %0(s16) = COPY %di
- %1(s16) = COPY %si
+ ; CHECK: $eax = COPY [[ZEXT]](s32)
+ ; CHECK: RET 0, implicit $eax
+ %0(s16) = COPY $di
+ %1(s16) = COPY $si
%2(s1) = G_ICMP intpred(ult), %0(s16), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -103,21 +103,21 @@
- { id: 3, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_cmp_i32
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
- ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY]](s32), [[COPY1]]
; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
- ; CHECK: %eax = COPY [[ZEXT]](s32)
- ; CHECK: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; CHECK: $eax = COPY [[ZEXT]](s32)
+ ; CHECK: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s1) = G_ICMP intpred(ult), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -132,21 +132,21 @@
- { id: 3, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
; CHECK-LABEL: name: test_cmp_i64
- ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %rdi
- ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %rsi
+ ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $rdi
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $rsi
; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY]](s64), [[COPY1]]
; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
- ; CHECK: %eax = COPY [[ZEXT]](s32)
- ; CHECK: RET 0, implicit %eax
- %0(s64) = COPY %rdi
- %1(s64) = COPY %rsi
+ ; CHECK: $eax = COPY [[ZEXT]](s32)
+ ; CHECK: RET 0, implicit $eax
+ %0(s64) = COPY $rdi
+ %1(s64) = COPY $rsi
%2(s1) = G_ICMP intpred(ult), %0(s64), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -161,20 +161,20 @@
- { id: 3, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
; CHECK-LABEL: name: test_cmp_p0
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %rdi
- ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY %rsi
+ ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $rdi
+ ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $rsi
; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY]](p0), [[COPY1]]
; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
- ; CHECK: %eax = COPY [[ZEXT]](s32)
- ; CHECK: RET 0, implicit %eax
- %0(p0) = COPY %rdi
- %1(p0) = COPY %rsi
+ ; CHECK: $eax = COPY [[ZEXT]](s32)
+ ; CHECK: RET 0, implicit $eax
+ %0(p0) = COPY $rdi
+ %1(p0) = COPY $rsi
%2(s1) = G_ICMP intpred(ult), %0(p0), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-constant.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-constant.mir
index 1697afb..7322174 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-constant.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-constant.mir
@@ -20,46 +20,46 @@
; X32-LABEL: name: test_constant
; X32: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
; X32: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s8)
- ; X32: %eax = COPY [[ANYEXT]](s32)
+ ; X32: $eax = COPY [[ANYEXT]](s32)
; X32: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 8
- ; X32: %al = COPY [[C1]](s8)
+ ; X32: $al = COPY [[C1]](s8)
; X32: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 16
- ; X32: %ax = COPY [[C2]](s16)
+ ; X32: $ax = COPY [[C2]](s16)
; X32: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
- ; X32: %eax = COPY [[C3]](s32)
+ ; X32: $eax = COPY [[C3]](s32)
; X32: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
; X32: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; X32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C4]](s32), [[C5]](s32)
- ; X32: %rax = COPY [[MV]](s64)
+ ; X32: $rax = COPY [[MV]](s64)
; X32: RET 0
; X64-LABEL: name: test_constant
; X64: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
; X64: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s8)
- ; X64: %eax = COPY [[ANYEXT]](s32)
+ ; X64: $eax = COPY [[ANYEXT]](s32)
; X64: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 8
- ; X64: %al = COPY [[C1]](s8)
+ ; X64: $al = COPY [[C1]](s8)
; X64: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 16
- ; X64: %ax = COPY [[C2]](s16)
+ ; X64: $ax = COPY [[C2]](s16)
; X64: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
- ; X64: %eax = COPY [[C3]](s32)
+ ; X64: $eax = COPY [[C3]](s32)
; X64: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
- ; X64: %rax = COPY [[C4]](s64)
+ ; X64: $rax = COPY [[C4]](s64)
; X64: RET 0
%0(s1) = G_CONSTANT i1 1
%5:_(s32) = G_ANYEXT %0
- %eax = COPY %5
+ $eax = COPY %5
%1(s8) = G_CONSTANT i8 8
- %al = COPY %1
+ $al = COPY %1
%2(s16) = G_CONSTANT i16 16
- %ax = COPY %2
+ $ax = COPY %2
%3(s32) = G_CONSTANT i32 32
- %eax = COPY %3
+ $eax = COPY %3
%4(s64) = G_CONSTANT i64 64
- %rax = COPY %4
+ $rax = COPY %4
RET 0
...
@@ -73,17 +73,17 @@
; X32-LABEL: name: test_fconstant
; X32: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
- ; X32: %eax = COPY [[C]](s32)
+ ; X32: $eax = COPY [[C]](s32)
; X32: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
- ; X32: %rax = COPY [[C1]](s64)
+ ; X32: $rax = COPY [[C1]](s64)
; X64-LABEL: name: test_fconstant
; X64: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
- ; X64: %eax = COPY [[C]](s32)
+ ; X64: $eax = COPY [[C]](s32)
; X64: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
- ; X64: %rax = COPY [[C1]](s64)
+ ; X64: $rax = COPY [[C1]](s64)
%0(s32) = G_FCONSTANT float 1.0
- %eax = COPY %0
+ $eax = COPY %0
%1(s64) = G_FCONSTANT double 2.0
- %rax = COPY %1
+ $rax = COPY %1
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-ext-x86-64.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-ext-x86-64.mir
index eb8c1da..f263da9 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-ext-x86-64.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-ext-x86-64.mir
@@ -73,19 +73,19 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_sext_i1
- ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s8)
; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[TRUNC]](s1)
- ; CHECK: %rax = COPY [[SEXT]](s64)
- ; CHECK: RET 0, implicit %rax
- %0(s8) = COPY %dil
+ ; CHECK: $rax = COPY [[SEXT]](s64)
+ ; CHECK: RET 0, implicit $rax
+ %0(s8) = COPY $dil
%1(s1) = G_TRUNC %0(s8)
%2(s64) = G_SEXT %1(s1)
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
---
@@ -98,17 +98,17 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_sext_i8
- ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY]](s8)
- ; CHECK: %rax = COPY [[SEXT]](s64)
- ; CHECK: RET 0, implicit %rax
- %0(s8) = COPY %dil
+ ; CHECK: $rax = COPY [[SEXT]](s64)
+ ; CHECK: RET 0, implicit $rax
+ %0(s8) = COPY $dil
%1(s64) = G_SEXT %0(s8)
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
---
@@ -121,17 +121,17 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_sext_i16
- ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY %di
+ ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY $di
; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY]](s16)
- ; CHECK: %rax = COPY [[SEXT]](s64)
- ; CHECK: RET 0, implicit %rax
- %0(s16) = COPY %di
+ ; CHECK: $rax = COPY [[SEXT]](s64)
+ ; CHECK: RET 0, implicit $rax
+ %0(s16) = COPY $di
%1(s64) = G_SEXT %0(s16)
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
---
@@ -144,17 +144,17 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_sext_i32
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY]](s32)
- ; CHECK: %rax = COPY [[SEXT]](s64)
- ; CHECK: RET 0, implicit %rax
- %0(s32) = COPY %edi
+ ; CHECK: $rax = COPY [[SEXT]](s64)
+ ; CHECK: RET 0, implicit $rax
+ %0(s32) = COPY $edi
%1(s64) = G_SEXT %0(s32)
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
---
@@ -168,20 +168,20 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_zext_i1
- ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY]](s8)
; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C]]
- ; CHECK: %rax = COPY [[AND]](s64)
- ; CHECK: RET 0, implicit %rax
- %0(s8) = COPY %dil
+ ; CHECK: $rax = COPY [[AND]](s64)
+ ; CHECK: RET 0, implicit $rax
+ %0(s8) = COPY $dil
%1(s1) = G_TRUNC %0(s8)
%2(s64) = G_ZEXT %1(s1)
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
---
@@ -194,17 +194,17 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_zext_i8
- ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY]](s8)
- ; CHECK: %rax = COPY [[ZEXT]](s64)
- ; CHECK: RET 0, implicit %rax
- %0(s8) = COPY %dil
+ ; CHECK: $rax = COPY [[ZEXT]](s64)
+ ; CHECK: RET 0, implicit $rax
+ %0(s8) = COPY $dil
%1(s64) = G_ZEXT %0(s8)
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
---
@@ -217,17 +217,17 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_zext_i16
- ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY %di
+ ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY $di
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY]](s16)
- ; CHECK: %rax = COPY [[ZEXT]](s64)
- ; CHECK: RET 0, implicit %rax
- %0(s16) = COPY %di
+ ; CHECK: $rax = COPY [[ZEXT]](s64)
+ ; CHECK: RET 0, implicit $rax
+ %0(s16) = COPY $di
%1(s64) = G_ZEXT %0(s16)
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
---
@@ -240,17 +240,17 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_zext_i32
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY]](s32)
- ; CHECK: %rax = COPY [[ZEXT]](s64)
- ; CHECK: RET 0, implicit %rax
- %0(s32) = COPY %edi
+ ; CHECK: $rax = COPY [[ZEXT]](s64)
+ ; CHECK: RET 0, implicit $rax
+ %0(s32) = COPY $edi
%1(s64) = G_ZEXT %0(s32)
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
---
@@ -264,18 +264,18 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_anyext_i1
- ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY]](s8)
- ; CHECK: %rax = COPY [[ANYEXT]](s64)
- ; CHECK: RET 0, implicit %rax
- %0(s8) = COPY %dil
+ ; CHECK: $rax = COPY [[ANYEXT]](s64)
+ ; CHECK: RET 0, implicit $rax
+ %0(s8) = COPY $dil
%1(s1) = G_TRUNC %0(s8)
%2(s64) = G_ANYEXT %1(s1)
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
---
@@ -288,17 +288,17 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_anyext_i8
- ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY]](s8)
- ; CHECK: %rax = COPY [[ANYEXT]](s64)
- ; CHECK: RET 0, implicit %rax
- %0(s8) = COPY %dil
+ ; CHECK: $rax = COPY [[ANYEXT]](s64)
+ ; CHECK: RET 0, implicit $rax
+ %0(s8) = COPY $dil
%1(s64) = G_ANYEXT %0(s8)
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
---
@@ -311,17 +311,17 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_anyext_i16
- ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY %di
+ ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY $di
; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY]](s16)
- ; CHECK: %rax = COPY [[ANYEXT]](s64)
- ; CHECK: RET 0, implicit %rax
- %0(s16) = COPY %di
+ ; CHECK: $rax = COPY [[ANYEXT]](s64)
+ ; CHECK: RET 0, implicit $rax
+ %0(s16) = COPY $di
%1(s64) = G_ANYEXT %0(s16)
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
---
@@ -334,17 +334,17 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_anyext_i32
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY]](s32)
- ; CHECK: %rax = COPY [[ANYEXT]](s64)
- ; CHECK: RET 0, implicit %rax
- %0(s32) = COPY %edi
+ ; CHECK: $rax = COPY [[ANYEXT]](s64)
+ ; CHECK: RET 0, implicit $rax
+ %0(s32) = COPY $edi
%1(s64) = G_ANYEXT %0(s32)
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-ext.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-ext.mir
index 9a062a7..c7daa03 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-ext.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-ext.mir
@@ -101,27 +101,27 @@
- { id: 2, class: _, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_zext_i1toi8
- ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
+ ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; X32: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
; X32: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; X32: [[AND:%[0-9]+]]:_(s8) = G_AND [[TRUNC]], [[C]]
- ; X32: %al = COPY [[AND]](s8)
- ; X32: RET 0, implicit %al
+ ; X32: $al = COPY [[AND]](s8)
+ ; X32: RET 0, implicit $al
; X64-LABEL: name: test_zext_i1toi8
- ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
+ ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; X64: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
; X64: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; X64: [[AND:%[0-9]+]]:_(s8) = G_AND [[TRUNC]], [[C]]
- ; X64: %al = COPY [[AND]](s8)
- ; X64: RET 0, implicit %al
- %1:_(s32) = COPY %edi
+ ; X64: $al = COPY [[AND]](s8)
+ ; X64: RET 0, implicit $al
+ %1:_(s32) = COPY $edi
%0:_(s1) = G_TRUNC %1(s32)
%2:_(s8) = G_ZEXT %0(s1)
- %al = COPY %2(s8)
- RET 0, implicit %al
+ $al = COPY %2(s8)
+ RET 0, implicit $al
...
---
@@ -134,27 +134,27 @@
- { id: 1, class: _, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_zext_i1toi16
- ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
+ ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; X32: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
; X32: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; X32: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
- ; X32: %ax = COPY [[AND]](s16)
- ; X32: RET 0, implicit %ax
+ ; X32: $ax = COPY [[AND]](s16)
+ ; X32: RET 0, implicit $ax
; X64-LABEL: name: test_zext_i1toi16
- ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
+ ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; X64: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
; X64: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; X64: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
- ; X64: %ax = COPY [[AND]](s16)
- ; X64: RET 0, implicit %ax
- %1:_(s32) = COPY %edi
+ ; X64: $ax = COPY [[AND]](s16)
+ ; X64: RET 0, implicit $ax
+ %1:_(s32) = COPY $edi
%0:_(s1) = G_TRUNC %1(s32)
%2:_(s16) = G_ZEXT %0(s1)
- %ax = COPY %2(s16)
- RET 0, implicit %ax
+ $ax = COPY %2(s16)
+ RET 0, implicit $ax
...
---
@@ -168,27 +168,27 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_zext_i1
- ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; X32: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s8)
; X32: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C]]
- ; X32: %eax = COPY [[AND]](s32)
- ; X32: RET 0, implicit %eax
+ ; X32: $eax = COPY [[AND]](s32)
+ ; X32: RET 0, implicit $eax
; X64-LABEL: name: test_zext_i1
- ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X64: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; X64: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s8)
; X64: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C]]
- ; X64: %eax = COPY [[AND]](s32)
- ; X64: RET 0, implicit %eax
- %0(s8) = COPY %dil
+ ; X64: $eax = COPY [[AND]](s32)
+ ; X64: RET 0, implicit $eax
+ %0(s8) = COPY $dil
%1(s1) = G_TRUNC %0(s8)
%2(s32) = G_ZEXT %1(s1)
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -201,22 +201,22 @@
- { id: 1, class: _, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_zext_i8toi16
- ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X32: [[ZEXT:%[0-9]+]]:_(s16) = G_ZEXT [[COPY]](s8)
- ; X32: %ax = COPY [[ZEXT]](s16)
- ; X32: RET 0, implicit %ax
+ ; X32: $ax = COPY [[ZEXT]](s16)
+ ; X32: RET 0, implicit $ax
; X64-LABEL: name: test_zext_i8toi16
- ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X64: [[ZEXT:%[0-9]+]]:_(s16) = G_ZEXT [[COPY]](s8)
- ; X64: %ax = COPY [[ZEXT]](s16)
- ; X64: RET 0, implicit %ax
- %0(s8) = COPY %dil
+ ; X64: $ax = COPY [[ZEXT]](s16)
+ ; X64: RET 0, implicit $ax
+ %0(s8) = COPY $dil
%1(s16) = G_ZEXT %0(s8)
- %ax = COPY %1(s16)
- RET 0, implicit %ax
+ $ax = COPY %1(s16)
+ RET 0, implicit $ax
...
---
@@ -229,22 +229,22 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_zext_i8
- ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X32: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[COPY]](s8)
- ; X32: %eax = COPY [[ZEXT]](s32)
- ; X32: RET 0, implicit %eax
+ ; X32: $eax = COPY [[ZEXT]](s32)
+ ; X32: RET 0, implicit $eax
; X64-LABEL: name: test_zext_i8
- ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X64: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[COPY]](s8)
- ; X64: %eax = COPY [[ZEXT]](s32)
- ; X64: RET 0, implicit %eax
- %0(s8) = COPY %dil
+ ; X64: $eax = COPY [[ZEXT]](s32)
+ ; X64: RET 0, implicit $eax
+ %0(s8) = COPY $dil
%1(s32) = G_ZEXT %0(s8)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -257,22 +257,22 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_zext_i16
- ; X32: [[COPY:%[0-9]+]]:_(s16) = COPY %di
+ ; X32: [[COPY:%[0-9]+]]:_(s16) = COPY $di
; X32: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[COPY]](s16)
- ; X32: %eax = COPY [[ZEXT]](s32)
- ; X32: RET 0, implicit %eax
+ ; X32: $eax = COPY [[ZEXT]](s32)
+ ; X32: RET 0, implicit $eax
; X64-LABEL: name: test_zext_i16
- ; X64: [[COPY:%[0-9]+]]:_(s16) = COPY %di
+ ; X64: [[COPY:%[0-9]+]]:_(s16) = COPY $di
; X64: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[COPY]](s16)
- ; X64: %eax = COPY [[ZEXT]](s32)
- ; X64: RET 0, implicit %eax
- %0(s16) = COPY %di
+ ; X64: $eax = COPY [[ZEXT]](s32)
+ ; X64: RET 0, implicit $eax
+ %0(s16) = COPY $di
%1(s32) = G_ZEXT %0(s16)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -285,20 +285,20 @@
- { id: 1, class: _, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_sext_i1toi8
; X32: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
- ; X32: %al = COPY [[DEF]](s8)
- ; X32: RET 0, implicit %al
+ ; X32: $al = COPY [[DEF]](s8)
+ ; X32: RET 0, implicit $al
; X64-LABEL: name: test_sext_i1toi8
; X64: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
- ; X64: %al = COPY [[DEF]](s8)
- ; X64: RET 0, implicit %al
+ ; X64: $al = COPY [[DEF]](s8)
+ ; X64: RET 0, implicit $al
%0(s1) = G_IMPLICIT_DEF
%1(s8) = G_SEXT %0(s1)
- %al = COPY %1(s8)
- RET 0, implicit %al
+ $al = COPY %1(s8)
+ RET 0, implicit $al
...
---
@@ -311,20 +311,20 @@
- { id: 1, class: _, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_sext_i1toi16
; X32: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
- ; X32: %ax = COPY [[DEF]](s16)
- ; X32: RET 0, implicit %ax
+ ; X32: $ax = COPY [[DEF]](s16)
+ ; X32: RET 0, implicit $ax
; X64-LABEL: name: test_sext_i1toi16
; X64: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
- ; X64: %ax = COPY [[DEF]](s16)
- ; X64: RET 0, implicit %ax
+ ; X64: $ax = COPY [[DEF]](s16)
+ ; X64: RET 0, implicit $ax
%0(s1) = G_IMPLICIT_DEF
%1(s16) = G_SEXT %0(s1)
- %ax = COPY %1(s16)
- RET 0, implicit %ax
+ $ax = COPY %1(s16)
+ RET 0, implicit $ax
...
---
@@ -338,20 +338,20 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_sext_i1
; X32: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
- ; X32: %eax = COPY [[DEF]](s32)
- ; X32: RET 0, implicit %eax
+ ; X32: $eax = COPY [[DEF]](s32)
+ ; X32: RET 0, implicit $eax
; X64-LABEL: name: test_sext_i1
; X64: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
- ; X64: %eax = COPY [[DEF]](s32)
- ; X64: RET 0, implicit %eax
+ ; X64: $eax = COPY [[DEF]](s32)
+ ; X64: RET 0, implicit $eax
%0(s1) = G_IMPLICIT_DEF
%2(s32) = G_SEXT %0(s1)
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -364,22 +364,22 @@
- { id: 1, class: _, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_sext_i8toi16
- ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X32: [[SEXT:%[0-9]+]]:_(s16) = G_SEXT [[COPY]](s8)
- ; X32: %ax = COPY [[SEXT]](s16)
- ; X32: RET 0, implicit %ax
+ ; X32: $ax = COPY [[SEXT]](s16)
+ ; X32: RET 0, implicit $ax
; X64-LABEL: name: test_sext_i8toi16
- ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X64: [[SEXT:%[0-9]+]]:_(s16) = G_SEXT [[COPY]](s8)
- ; X64: %ax = COPY [[SEXT]](s16)
- ; X64: RET 0, implicit %ax
- %0(s8) = COPY %dil
+ ; X64: $ax = COPY [[SEXT]](s16)
+ ; X64: RET 0, implicit $ax
+ %0(s8) = COPY $dil
%1(s16) = G_SEXT %0(s8)
- %ax = COPY %1(s16)
- RET 0, implicit %ax
+ $ax = COPY %1(s16)
+ RET 0, implicit $ax
...
---
@@ -392,22 +392,22 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_sext_i8
- ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X32: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[COPY]](s8)
- ; X32: %eax = COPY [[SEXT]](s32)
- ; X32: RET 0, implicit %eax
+ ; X32: $eax = COPY [[SEXT]](s32)
+ ; X32: RET 0, implicit $eax
; X64-LABEL: name: test_sext_i8
- ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X64: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[COPY]](s8)
- ; X64: %eax = COPY [[SEXT]](s32)
- ; X64: RET 0, implicit %eax
- %0(s8) = COPY %dil
+ ; X64: $eax = COPY [[SEXT]](s32)
+ ; X64: RET 0, implicit $eax
+ %0(s8) = COPY $dil
%1(s32) = G_SEXT %0(s8)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -420,22 +420,22 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_sext_i16
- ; X32: [[COPY:%[0-9]+]]:_(s16) = COPY %di
+ ; X32: [[COPY:%[0-9]+]]:_(s16) = COPY $di
; X32: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[COPY]](s16)
- ; X32: %eax = COPY [[SEXT]](s32)
- ; X32: RET 0, implicit %eax
+ ; X32: $eax = COPY [[SEXT]](s32)
+ ; X32: RET 0, implicit $eax
; X64-LABEL: name: test_sext_i16
- ; X64: [[COPY:%[0-9]+]]:_(s16) = COPY %di
+ ; X64: [[COPY:%[0-9]+]]:_(s16) = COPY $di
; X64: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[COPY]](s16)
- ; X64: %eax = COPY [[SEXT]](s32)
- ; X64: RET 0, implicit %eax
- %0(s16) = COPY %di
+ ; X64: $eax = COPY [[SEXT]](s32)
+ ; X64: RET 0, implicit $eax
+ %0(s16) = COPY $di
%1(s32) = G_SEXT %0(s16)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -449,23 +449,23 @@
- { id: 2, class: _, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_anyext_i1toi8
- ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
+ ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; X32: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
- ; X32: %al = COPY [[TRUNC]](s8)
- ; X32: RET 0, implicit %al
+ ; X32: $al = COPY [[TRUNC]](s8)
+ ; X32: RET 0, implicit $al
; X64-LABEL: name: test_anyext_i1toi8
- ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
+ ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; X64: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
- ; X64: %al = COPY [[TRUNC]](s8)
- ; X64: RET 0, implicit %al
- %0(s32) = COPY %edi
+ ; X64: $al = COPY [[TRUNC]](s8)
+ ; X64: RET 0, implicit $al
+ %0(s32) = COPY $edi
%1(s1) = G_TRUNC %0(s32)
%2(s8) = G_ANYEXT %1(s1)
- %al = COPY %2(s8)
- RET 0, implicit %al
+ $al = COPY %2(s8)
+ RET 0, implicit $al
...
---
@@ -479,23 +479,23 @@
- { id: 2, class: _, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_anyext_i1toi16
- ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
+ ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; X32: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; X32: %ax = COPY [[TRUNC]](s16)
- ; X32: RET 0, implicit %ax
+ ; X32: $ax = COPY [[TRUNC]](s16)
+ ; X32: RET 0, implicit $ax
; X64-LABEL: name: test_anyext_i1toi16
- ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
+ ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; X64: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; X64: %ax = COPY [[TRUNC]](s16)
- ; X64: RET 0, implicit %ax
- %0(s32) = COPY %edi
+ ; X64: $ax = COPY [[TRUNC]](s16)
+ ; X64: RET 0, implicit $ax
+ %0(s32) = COPY $edi
%1(s1) = G_TRUNC %0(s32)
%2(s16) = G_ANYEXT %1(s1)
- %ax = COPY %2(s16)
- RET 0, implicit %ax
+ $ax = COPY %2(s16)
+ RET 0, implicit $ax
...
---
@@ -509,23 +509,23 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_anyext_i1
- ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X32: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s8)
- ; X32: %eax = COPY [[ANYEXT]](s32)
- ; X32: RET 0, implicit %eax
+ ; X32: $eax = COPY [[ANYEXT]](s32)
+ ; X32: RET 0, implicit $eax
; X64-LABEL: name: test_anyext_i1
- ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X64: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s8)
- ; X64: %eax = COPY [[ANYEXT]](s32)
- ; X64: RET 0, implicit %eax
- %0(s8) = COPY %dil
+ ; X64: $eax = COPY [[ANYEXT]](s32)
+ ; X64: RET 0, implicit $eax
+ %0(s8) = COPY $dil
%1(s1) = G_TRUNC %0(s8)
%2(s32) = G_ANYEXT %1(s1)
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -538,22 +538,22 @@
- { id: 1, class: _, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_anyext_i8toi16
- ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X32: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[COPY]](s8)
- ; X32: %ax = COPY [[ANYEXT]](s16)
- ; X32: RET 0, implicit %ax
+ ; X32: $ax = COPY [[ANYEXT]](s16)
+ ; X32: RET 0, implicit $ax
; X64-LABEL: name: test_anyext_i8toi16
- ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X64: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[COPY]](s8)
- ; X64: %ax = COPY [[ANYEXT]](s16)
- ; X64: RET 0, implicit %ax
- %0(s8) = COPY %dil
+ ; X64: $ax = COPY [[ANYEXT]](s16)
+ ; X64: RET 0, implicit $ax
+ %0(s8) = COPY $dil
%1(s16) = G_ANYEXT %0(s8)
- %ax = COPY %1(s16)
- RET 0, implicit %ax
+ $ax = COPY %1(s16)
+ RET 0, implicit $ax
...
---
@@ -566,22 +566,22 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_anyext_i8
- ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X32: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s8)
- ; X32: %eax = COPY [[ANYEXT]](s32)
- ; X32: RET 0, implicit %eax
+ ; X32: $eax = COPY [[ANYEXT]](s32)
+ ; X32: RET 0, implicit $eax
; X64-LABEL: name: test_anyext_i8
- ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X64: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s8)
- ; X64: %eax = COPY [[ANYEXT]](s32)
- ; X64: RET 0, implicit %eax
- %0(s8) = COPY %dil
+ ; X64: $eax = COPY [[ANYEXT]](s32)
+ ; X64: RET 0, implicit $eax
+ %0(s8) = COPY $dil
%1(s32) = G_ANYEXT %0(s8)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -594,21 +594,21 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_anyext_i16
- ; X32: [[COPY:%[0-9]+]]:_(s16) = COPY %di
+ ; X32: [[COPY:%[0-9]+]]:_(s16) = COPY $di
; X32: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s16)
- ; X32: %eax = COPY [[ANYEXT]](s32)
- ; X32: RET 0, implicit %eax
+ ; X32: $eax = COPY [[ANYEXT]](s32)
+ ; X32: RET 0, implicit $eax
; X64-LABEL: name: test_anyext_i16
- ; X64: [[COPY:%[0-9]+]]:_(s16) = COPY %di
+ ; X64: [[COPY:%[0-9]+]]:_(s16) = COPY $di
; X64: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s16)
- ; X64: %eax = COPY [[ANYEXT]](s32)
- ; X64: RET 0, implicit %eax
- %0(s16) = COPY %di
+ ; X64: $eax = COPY [[ANYEXT]](s32)
+ ; X64: RET 0, implicit $eax
+ %0(s16) = COPY $di
%1(s32) = G_ANYEXT %0(s16)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-fadd-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-fadd-scalar.mir
index 407c425..19212d2 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-fadd-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-fadd-scalar.mir
@@ -28,19 +28,19 @@
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_fadd_float
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $xmm1
; CHECK: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[FADD]](s32)
- ; CHECK: RET 0, implicit %xmm0
- %0(s32) = COPY %xmm0
- %1(s32) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[FADD]](s32)
+ ; CHECK: RET 0, implicit $xmm0
+ %0(s32) = COPY $xmm0
+ %1(s32) = COPY $xmm1
%2(s32) = G_FADD %0, %1
- %xmm0 = COPY %2(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s32)
+ RET 0, implicit $xmm0
...
---
@@ -58,18 +58,18 @@
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_fadd_double
- ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $xmm1
; CHECK: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[FADD]](s64)
- ; CHECK: RET 0, implicit %xmm0
- %0(s64) = COPY %xmm0
- %1(s64) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[FADD]](s64)
+ ; CHECK: RET 0, implicit $xmm0
+ %0(s64) = COPY $xmm0
+ %1(s64) = COPY $xmm1
%2(s64) = G_FADD %0, %1
- %xmm0 = COPY %2(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s64)
+ RET 0, implicit $xmm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-fdiv-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-fdiv-scalar.mir
index 128ab9b..4c7a173 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-fdiv-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-fdiv-scalar.mir
@@ -28,19 +28,19 @@
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_fdiv_float
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $xmm1
; CHECK: [[FDIV:%[0-9]+]]:_(s32) = G_FDIV [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[FDIV]](s32)
- ; CHECK: RET 0, implicit %xmm0
- %0(s32) = COPY %xmm0
- %1(s32) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[FDIV]](s32)
+ ; CHECK: RET 0, implicit $xmm0
+ %0(s32) = COPY $xmm0
+ %1(s32) = COPY $xmm1
%2(s32) = G_FDIV %0, %1
- %xmm0 = COPY %2(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s32)
+ RET 0, implicit $xmm0
...
---
@@ -58,18 +58,18 @@
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_fdiv_double
- ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $xmm1
; CHECK: [[FDIV:%[0-9]+]]:_(s64) = G_FDIV [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[FDIV]](s64)
- ; CHECK: RET 0, implicit %xmm0
- %0(s64) = COPY %xmm0
- %1(s64) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[FDIV]](s64)
+ ; CHECK: RET 0, implicit $xmm0
+ %0(s64) = COPY $xmm0
+ %1(s64) = COPY $xmm1
%2(s64) = G_FDIV %0, %1
- %xmm0 = COPY %2(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s64)
+ RET 0, implicit $xmm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-fmul-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-fmul-scalar.mir
index 73e04d0..e7d9ad2 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-fmul-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-fmul-scalar.mir
@@ -28,19 +28,19 @@
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_fmul_float
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $xmm1
; CHECK: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[FMUL]](s32)
- ; CHECK: RET 0, implicit %xmm0
- %0(s32) = COPY %xmm0
- %1(s32) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[FMUL]](s32)
+ ; CHECK: RET 0, implicit $xmm0
+ %0(s32) = COPY $xmm0
+ %1(s32) = COPY $xmm1
%2(s32) = G_FMUL %0, %1
- %xmm0 = COPY %2(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s32)
+ RET 0, implicit $xmm0
...
---
@@ -58,18 +58,18 @@
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_fmul_double
- ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $xmm1
; CHECK: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[FMUL]](s64)
- ; CHECK: RET 0, implicit %xmm0
- %0(s64) = COPY %xmm0
- %1(s64) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[FMUL]](s64)
+ ; CHECK: RET 0, implicit $xmm0
+ %0(s64) = COPY $xmm0
+ %1(s64) = COPY $xmm1
%2(s64) = G_FMUL %0, %1
- %xmm0 = COPY %2(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s64)
+ RET 0, implicit $xmm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-fpext-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-fpext-scalar.mir
index 25d1fbc..1d9097f 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-fpext-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-fpext-scalar.mir
@@ -19,16 +19,16 @@
- { id: 1, class: _, preferred-register: '' }
body: |
bb.1.entry:
- liveins: %xmm0
+ liveins: $xmm0
; ALL-LABEL: name: test
- ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %xmm0
+ ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $xmm0
; ALL: [[FPEXT:%[0-9]+]]:_(s64) = G_FPEXT [[COPY]](s32)
- ; ALL: %xmm0 = COPY [[FPEXT]](s64)
- ; ALL: RET 0, implicit %xmm0
- %0(s32) = COPY %xmm0
+ ; ALL: $xmm0 = COPY [[FPEXT]](s64)
+ ; ALL: RET 0, implicit $xmm0
+ %0(s32) = COPY $xmm0
%1(s64) = G_FPEXT %0(s32)
- %xmm0 = COPY %1(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(s64)
+ RET 0, implicit $xmm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-fsub-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-fsub-scalar.mir
index 253d1fb..f78ca69 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-fsub-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-fsub-scalar.mir
@@ -28,19 +28,19 @@
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_fsub_float
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $xmm1
; CHECK: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[FSUB]](s32)
- ; CHECK: RET 0, implicit %xmm0
- %0(s32) = COPY %xmm0
- %1(s32) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[FSUB]](s32)
+ ; CHECK: RET 0, implicit $xmm0
+ %0(s32) = COPY $xmm0
+ %1(s32) = COPY $xmm1
%2(s32) = G_FSUB %0, %1
- %xmm0 = COPY %2(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s32)
+ RET 0, implicit $xmm0
...
---
@@ -58,18 +58,18 @@
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_fsub_double
- ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $xmm1
; CHECK: [[FSUB:%[0-9]+]]:_(s64) = G_FSUB [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[FSUB]](s64)
- ; CHECK: RET 0, implicit %xmm0
- %0(s64) = COPY %xmm0
- %1(s64) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[FSUB]](s64)
+ ; CHECK: RET 0, implicit $xmm0
+ %0(s64) = COPY $xmm0
+ %1(s64) = COPY $xmm1
%2(s64) = G_FSUB %0, %1
- %xmm0 = COPY %2(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s64)
+ RET 0, implicit $xmm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-insert-vec256.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-insert-vec256.mir
index 613f2a7..f9c97f8 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-insert-vec256.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-insert-vec256.mir
@@ -15,19 +15,19 @@
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
-# ALL: %0:_(<8 x s32>) = COPY %ymm0
-# ALL-NEXT: %1:_(<4 x s32>) = COPY %xmm1
+# ALL: %0:_(<8 x s32>) = COPY $ymm0
+# ALL-NEXT: %1:_(<4 x s32>) = COPY $xmm1
# ALL-NEXT: %2:_(<8 x s32>) = G_INSERT %0, %1(<4 x s32>), 0
-# ALL-NEXT: %ymm0 = COPY %2(<8 x s32>)
-# ALL-NEXT: RET 0, implicit %ymm0
+# ALL-NEXT: $ymm0 = COPY %2(<8 x s32>)
+# ALL-NEXT: RET 0, implicit $ymm0
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<8 x s32>) = COPY %ymm0
- %1(<4 x s32>) = COPY %xmm1
+ %0(<8 x s32>) = COPY $ymm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<8 x s32>) = G_INSERT %0(<8 x s32>), %1(<4 x s32>), 0
- %ymm0 = COPY %2(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<8 x s32>)
+ RET 0, implicit $ymm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-insert-vec512.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-insert-vec512.mir
index d9fb35e..a1262e8 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-insert-vec512.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-insert-vec512.mir
@@ -21,19 +21,19 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %ymm1
+ liveins: $zmm0, $ymm1
; ALL-LABEL: name: test_insert_128
- ; ALL: [[COPY:%[0-9]+]]:_(<16 x s32>) = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY %xmm1
+ ; ALL: [[COPY:%[0-9]+]]:_(<16 x s32>) = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
; ALL: [[INSERT:%[0-9]+]]:_(<16 x s32>) = G_INSERT [[COPY]], [[COPY1]](<4 x s32>), 0
- ; ALL: %zmm0 = COPY [[INSERT]](<16 x s32>)
- ; ALL: RET 0, implicit %ymm0
- %0(<16 x s32>) = COPY %zmm0
- %1(<4 x s32>) = COPY %xmm1
+ ; ALL: $zmm0 = COPY [[INSERT]](<16 x s32>)
+ ; ALL: RET 0, implicit $ymm0
+ %0(<16 x s32>) = COPY $zmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<4 x s32>), 0
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %ymm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -47,18 +47,18 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %ymm1
+ liveins: $zmm0, $ymm1
; ALL-LABEL: name: test_insert_256
- ; ALL: [[COPY:%[0-9]+]]:_(<16 x s32>) = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:_(<8 x s32>) = COPY %ymm1
+ ; ALL: [[COPY:%[0-9]+]]:_(<16 x s32>) = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:_(<8 x s32>) = COPY $ymm1
; ALL: [[INSERT:%[0-9]+]]:_(<16 x s32>) = G_INSERT [[COPY]], [[COPY1]](<8 x s32>), 0
- ; ALL: %zmm0 = COPY [[INSERT]](<16 x s32>)
- ; ALL: RET 0, implicit %ymm0
- %0(<16 x s32>) = COPY %zmm0
- %1(<8 x s32>) = COPY %ymm1
+ ; ALL: $zmm0 = COPY [[INSERT]](<16 x s32>)
+ ; ALL: RET 0, implicit $ymm0
+ %0(<16 x s32>) = COPY $zmm0
+ %1(<8 x s32>) = COPY $ymm1
%2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<8 x s32>), 0
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %ymm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $ymm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-memop-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-memop-scalar.mir
index ee572a3..447532a 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-memop-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-memop-scalar.mir
@@ -30,7 +30,7 @@
- { id: 10, class: _, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; X64-LABEL: name: test_memop_s8tos32
; X64: [[DEF:%[0-9]+]]:_(p0) = IMPLICIT_DEF
@@ -88,7 +88,7 @@
#
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; X64-LABEL: name: test_memop_s64
; X64: [[DEF:%[0-9]+]]:_(p0) = IMPLICIT_DEF
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-mul-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-mul-scalar.mir
index 6f84488..fec5710 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-mul-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-mul-scalar.mir
@@ -33,7 +33,7 @@
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_mul_i1
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edx
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edx
; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; CHECK: [[MUL:%[0-9]+]]:_(s8) = G_MUL [[TRUNC]], [[TRUNC1]]
@@ -43,7 +43,7 @@
; CHECK: [[AND:%[0-9]+]]:_(s8) = G_AND [[COPY1]], [[C]]
; CHECK: G_STORE [[AND]](s8), [[DEF]](p0) :: (store 1)
; CHECK: RET 0
- %0(s32) = COPY %edx
+ %0(s32) = COPY $edx
%1(s1) = G_TRUNC %0(s32)
%2(s1) = G_MUL %1, %1
%3:_(p0) = G_IMPLICIT_DEF
@@ -61,19 +61,19 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_mul_i16
- ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY %di
- ; CHECK: [[COPY1:%[0-9]+]]:_(s16) = COPY %si
+ ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY $di
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s16) = COPY $si
; CHECK: [[MUL:%[0-9]+]]:_(s16) = G_MUL [[COPY]], [[COPY1]]
- ; CHECK: %ax = COPY [[MUL]](s16)
- ; CHECK: RET 0, implicit %ax
- %0(s16) = COPY %di
- %1(s16) = COPY %si
+ ; CHECK: $ax = COPY [[MUL]](s16)
+ ; CHECK: RET 0, implicit $ax
+ %0(s16) = COPY $di
+ %1(s16) = COPY $si
%2(s16) = G_MUL %0, %1
- %ax = COPY %2(s16)
- RET 0, implicit %ax
+ $ax = COPY %2(s16)
+ RET 0, implicit $ax
...
---
@@ -87,19 +87,19 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_mul_i32
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
- ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[COPY1]]
- ; CHECK: %eax = COPY [[MUL]](s32)
- ; CHECK: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; CHECK: $eax = COPY [[MUL]](s32)
+ ; CHECK: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s32) = G_MUL %0, %1
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -113,18 +113,18 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
; CHECK-LABEL: name: test_mul_i64
- ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %rdi
- ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %rsi
+ ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $rdi
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $rsi
; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY1]]
- ; CHECK: %rax = COPY [[MUL]](s64)
- ; CHECK: RET 0, implicit %rax
- %0(s64) = COPY %rdi
- %1(s64) = COPY %rsi
+ ; CHECK: $rax = COPY [[MUL]](s64)
+ ; CHECK: RET 0, implicit $rax
+ %0(s64) = COPY $rdi
+ %1(s64) = COPY $rsi
%2(s64) = G_MUL %0, %1
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-mul-v128.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-mul-v128.mir
index f14b6eb..47564e5 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-mul-v128.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-mul-v128.mir
@@ -33,20 +33,20 @@
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
-# ALL: %0:_(<8 x s16>) = COPY %xmm0
-# ALL-NEXT: %1:_(<8 x s16>) = COPY %xmm1
+# ALL: %0:_(<8 x s16>) = COPY $xmm0
+# ALL-NEXT: %1:_(<8 x s16>) = COPY $xmm1
# ALL-NEXT: %2:_(<8 x s16>) = G_MUL %0, %1
-# ALL-NEXT: %xmm0 = COPY %2(<8 x s16>)
-# ALL-NEXT: RET 0, implicit %xmm0
+# ALL-NEXT: $xmm0 = COPY %2(<8 x s16>)
+# ALL-NEXT: RET 0, implicit $xmm0
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<8 x s16>) = COPY %xmm0
- %1(<8 x s16>) = COPY %xmm1
+ %0(<8 x s16>) = COPY $xmm0
+ %1(<8 x s16>) = COPY $xmm1
%2(<8 x s16>) = G_MUL %0, %1
- %xmm0 = COPY %2(<8 x s16>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<8 x s16>)
+ RET 0, implicit $xmm0
...
---
@@ -63,20 +63,20 @@
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
-# ALL: %0:_(<4 x s32>) = COPY %xmm0
-# ALL-NEXT: %1:_(<4 x s32>) = COPY %xmm1
+# ALL: %0:_(<4 x s32>) = COPY $xmm0
+# ALL-NEXT: %1:_(<4 x s32>) = COPY $xmm1
# ALL-NEXT: %2:_(<4 x s32>) = G_MUL %0, %1
-# ALL-NEXT: %xmm0 = COPY %2(<4 x s32>)
-# ALL-NEXT: RET 0, implicit %xmm0
+# ALL-NEXT: $xmm0 = COPY %2(<4 x s32>)
+# ALL-NEXT: RET 0, implicit $xmm0
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<4 x s32>) = COPY %xmm0
- %1(<4 x s32>) = COPY %xmm1
+ %0(<4 x s32>) = COPY $xmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<4 x s32>) = G_MUL %0, %1
- %xmm0 = COPY %2(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -93,19 +93,19 @@
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
-# ALL: %0:_(<2 x s64>) = COPY %xmm0
-# ALL-NEXT: %1:_(<2 x s64>) = COPY %xmm1
+# ALL: %0:_(<2 x s64>) = COPY $xmm0
+# ALL-NEXT: %1:_(<2 x s64>) = COPY $xmm1
# ALL-NEXT: %2:_(<2 x s64>) = G_MUL %0, %1
-# ALL-NEXT: %xmm0 = COPY %2(<2 x s64>)
-# ALL-NEXT: RET 0, implicit %xmm0
+# ALL-NEXT: $xmm0 = COPY %2(<2 x s64>)
+# ALL-NEXT: RET 0, implicit $xmm0
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<2 x s64>) = COPY %xmm0
- %1(<2 x s64>) = COPY %xmm1
+ %0(<2 x s64>) = COPY $xmm0
+ %1(<2 x s64>) = COPY $xmm1
%2(<2 x s64>) = G_MUL %0, %1
- %xmm0 = COPY %2(<2 x s64>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<2 x s64>)
+ RET 0, implicit $xmm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-mul-v256.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-mul-v256.mir
index b0921a9..5fb13cb 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-mul-v256.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-mul-v256.mir
@@ -33,20 +33,20 @@
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
-# ALL: %0:_(<16 x s16>) = COPY %ymm0
-# ALL-NEXT: %1:_(<16 x s16>) = COPY %ymm1
+# ALL: %0:_(<16 x s16>) = COPY $ymm0
+# ALL-NEXT: %1:_(<16 x s16>) = COPY $ymm1
# ALL-NEXT: %2:_(<16 x s16>) = G_MUL %0, %1
-# ALL-NEXT: %ymm0 = COPY %2(<16 x s16>)
-# ALL-NEXT: RET 0, implicit %ymm0
+# ALL-NEXT: $ymm0 = COPY %2(<16 x s16>)
+# ALL-NEXT: RET 0, implicit $ymm0
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<16 x s16>) = COPY %ymm0
- %1(<16 x s16>) = COPY %ymm1
+ %0(<16 x s16>) = COPY $ymm0
+ %1(<16 x s16>) = COPY $ymm1
%2(<16 x s16>) = G_MUL %0, %1
- %ymm0 = COPY %2(<16 x s16>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<16 x s16>)
+ RET 0, implicit $ymm0
...
---
@@ -63,20 +63,20 @@
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
-# ALL: %0:_(<8 x s32>) = COPY %ymm0
-# ALL-NEXT: %1:_(<8 x s32>) = COPY %ymm1
+# ALL: %0:_(<8 x s32>) = COPY $ymm0
+# ALL-NEXT: %1:_(<8 x s32>) = COPY $ymm1
# ALL-NEXT: %2:_(<8 x s32>) = G_MUL %0, %1
-# ALL-NEXT: %ymm0 = COPY %2(<8 x s32>)
-# ALL-NEXT: RET 0, implicit %ymm0
+# ALL-NEXT: $ymm0 = COPY %2(<8 x s32>)
+# ALL-NEXT: RET 0, implicit $ymm0
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<8 x s32>) = COPY %ymm0
- %1(<8 x s32>) = COPY %ymm1
+ %0(<8 x s32>) = COPY $ymm0
+ %1(<8 x s32>) = COPY $ymm1
%2(<8 x s32>) = G_MUL %0, %1
- %ymm0 = COPY %2(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<8 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -93,19 +93,19 @@
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
-# ALL: %0:_(<4 x s64>) = COPY %ymm0
-# ALL-NEXT: %1:_(<4 x s64>) = COPY %ymm1
+# ALL: %0:_(<4 x s64>) = COPY $ymm0
+# ALL-NEXT: %1:_(<4 x s64>) = COPY $ymm1
# ALL-NEXT: %2:_(<4 x s64>) = G_MUL %0, %1
-# ALL-NEXT: %ymm0 = COPY %2(<4 x s64>)
-# ALL-NEXT: RET 0, implicit %ymm0
+# ALL-NEXT: $ymm0 = COPY %2(<4 x s64>)
+# ALL-NEXT: RET 0, implicit $ymm0
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<4 x s64>) = COPY %ymm0
- %1(<4 x s64>) = COPY %ymm1
+ %0(<4 x s64>) = COPY $ymm0
+ %1(<4 x s64>) = COPY $ymm1
%2(<4 x s64>) = G_MUL %0, %1
- %ymm0 = COPY %2(<4 x s64>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<4 x s64>)
+ RET 0, implicit $ymm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-mul-v512.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-mul-v512.mir
index 79d65f2..14c7e48 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-mul-v512.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-mul-v512.mir
@@ -35,20 +35,20 @@
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
-# ALL: %0:_(<32 x s16>) = COPY %zmm0
-# ALL-NEXT: %1:_(<32 x s16>) = COPY %zmm1
+# ALL: %0:_(<32 x s16>) = COPY $zmm0
+# ALL-NEXT: %1:_(<32 x s16>) = COPY $zmm1
# ALL-NEXT: %2:_(<32 x s16>) = G_MUL %0, %1
-# ALL-NEXT: %zmm0 = COPY %2(<32 x s16>)
-# ALL-NEXT: RET 0, implicit %zmm0
+# ALL-NEXT: $zmm0 = COPY %2(<32 x s16>)
+# ALL-NEXT: RET 0, implicit $zmm0
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
- %0(<32 x s16>) = COPY %zmm0
- %1(<32 x s16>) = COPY %zmm1
+ %0(<32 x s16>) = COPY $zmm0
+ %1(<32 x s16>) = COPY $zmm1
%2(<32 x s16>) = G_MUL %0, %1
- %zmm0 = COPY %2(<32 x s16>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<32 x s16>)
+ RET 0, implicit $zmm0
...
---
@@ -65,20 +65,20 @@
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
-# ALL: %0:_(<16 x s32>) = COPY %zmm0
-# ALL-NEXT: %1:_(<16 x s32>) = COPY %zmm1
+# ALL: %0:_(<16 x s32>) = COPY $zmm0
+# ALL-NEXT: %1:_(<16 x s32>) = COPY $zmm1
# ALL-NEXT: %2:_(<16 x s32>) = G_MUL %0, %1
-# ALL-NEXT: %zmm0 = COPY %2(<16 x s32>)
-# ALL-NEXT: RET 0, implicit %zmm0
+# ALL-NEXT: $zmm0 = COPY %2(<16 x s32>)
+# ALL-NEXT: RET 0, implicit $zmm0
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
- %0(<16 x s32>) = COPY %zmm0
- %1(<16 x s32>) = COPY %zmm1
+ %0(<16 x s32>) = COPY $zmm0
+ %1(<16 x s32>) = COPY $zmm1
%2(<16 x s32>) = G_MUL %0, %1
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $zmm0
...
---
@@ -95,19 +95,19 @@
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
-# ALL: %0:_(<8 x s64>) = COPY %zmm0
-# ALL-NEXT: %1:_(<8 x s64>) = COPY %zmm1
+# ALL: %0:_(<8 x s64>) = COPY $zmm0
+# ALL-NEXT: %1:_(<8 x s64>) = COPY $zmm1
# ALL-NEXT: %2:_(<8 x s64>) = G_MUL %0, %1
-# ALL-NEXT: %zmm0 = COPY %2(<8 x s64>)
-# ALL-NEXT: RET 0, implicit %zmm0
+# ALL-NEXT: $zmm0 = COPY %2(<8 x s64>)
+# ALL-NEXT: RET 0, implicit $zmm0
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
- %0(<8 x s64>) = COPY %zmm0
- %1(<8 x s64>) = COPY %zmm1
+ %0(<8 x s64>) = COPY $zmm0
+ %1(<8 x s64>) = COPY $zmm1
%2(<8 x s64>) = G_MUL %0, %1
- %zmm0 = COPY %2(<8 x s64>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<8 x s64>)
+ RET 0, implicit $zmm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-or-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-or-scalar.mir
index df6a3fd..85092b2 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-or-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-or-scalar.mir
@@ -41,7 +41,7 @@
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_or_i1
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edx
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edx
; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; CHECK: [[OR:%[0-9]+]]:_(s8) = G_OR [[TRUNC]], [[TRUNC1]]
@@ -51,7 +51,7 @@
; CHECK: [[AND:%[0-9]+]]:_(s8) = G_AND [[COPY1]], [[C]]
; CHECK: G_STORE [[AND]](s8), [[DEF]](p0) :: (store 1)
; CHECK: RET 0
- %0(s32) = COPY %edx
+ %0(s32) = COPY $edx
%1(s1) = G_TRUNC %0(s32)
%2(s1) = G_OR %1, %1
%3:_(p0) = G_IMPLICIT_DEF
@@ -75,12 +75,12 @@
; CHECK-LABEL: name: test_or_i8
; CHECK: [[DEF:%[0-9]+]]:_(s8) = IMPLICIT_DEF
; CHECK: [[OR:%[0-9]+]]:_(s8) = G_OR [[DEF]], [[DEF]]
- ; CHECK: %al = COPY [[OR]](s8)
- ; CHECK: RET 0, implicit %al
+ ; CHECK: $al = COPY [[OR]](s8)
+ ; CHECK: RET 0, implicit $al
%0(s8) = IMPLICIT_DEF
%1(s8) = G_OR %0, %0
- %al = COPY %1(s8)
- RET 0, implicit %al
+ $al = COPY %1(s8)
+ RET 0, implicit $al
...
---
@@ -100,12 +100,12 @@
; CHECK-LABEL: name: test_or_i16
; CHECK: [[DEF:%[0-9]+]]:_(s16) = IMPLICIT_DEF
; CHECK: [[OR:%[0-9]+]]:_(s16) = G_OR [[DEF]], [[DEF]]
- ; CHECK: %ax = COPY [[OR]](s16)
- ; CHECK: RET 0, implicit %ax
+ ; CHECK: $ax = COPY [[OR]](s16)
+ ; CHECK: RET 0, implicit $ax
%0(s16) = IMPLICIT_DEF
%1(s16) = G_OR %0, %0
- %ax = COPY %1(s16)
- RET 0, implicit %ax
+ $ax = COPY %1(s16)
+ RET 0, implicit $ax
...
---
@@ -125,12 +125,12 @@
; CHECK-LABEL: name: test_or_i32
; CHECK: [[DEF:%[0-9]+]]:_(s32) = IMPLICIT_DEF
; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[DEF]], [[DEF]]
- ; CHECK: %eax = COPY [[OR]](s32)
- ; CHECK: RET 0, implicit %eax
+ ; CHECK: $eax = COPY [[OR]](s32)
+ ; CHECK: RET 0, implicit $eax
%0(s32) = IMPLICIT_DEF
%1(s32) = G_OR %0, %0
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -150,11 +150,11 @@
; CHECK-LABEL: name: test_or_i64
; CHECK: [[DEF:%[0-9]+]]:_(s64) = IMPLICIT_DEF
; CHECK: [[OR:%[0-9]+]]:_(s64) = G_OR [[DEF]], [[DEF]]
- ; CHECK: %rax = COPY [[OR]](s64)
- ; CHECK: RET 0, implicit %rax
+ ; CHECK: $rax = COPY [[OR]](s64)
+ ; CHECK: RET 0, implicit $rax
%0(s64) = IMPLICIT_DEF
%1(s64) = G_OR %0, %0
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-phi.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-phi.mir
index 9297686..2710471 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-phi.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-phi.mir
@@ -144,10 +144,10 @@
; ALL-LABEL: name: test_i1
; ALL: bb.0.entry:
; ALL: successors: %bb.2(0x40000000), %bb.1(0x40000000)
- ; ALL: liveins: %edi, %edx, %esi
- ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi
- ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY %edx
+ ; ALL: liveins: $edi, $edx, $esi
+ ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
+ ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY $edx
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
; ALL: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
@@ -158,16 +158,16 @@
; ALL: bb.2.cond.end:
; ALL: [[PHI:%[0-9]+]]:_(s8) = G_PHI [[TRUNC1]](s8), %bb.1, [[TRUNC]](s8), %bb.0
; ALL: [[COPY3:%[0-9]+]]:_(s8) = COPY [[PHI]](s8)
- ; ALL: %al = COPY [[COPY3]](s8)
- ; ALL: RET 0, implicit %al
+ ; ALL: $al = COPY [[COPY3]](s8)
+ ; ALL: RET 0, implicit $al
bb.1.entry:
successors: %bb.3(0x40000000), %bb.2(0x40000000)
- liveins: %edi, %edx, %esi
+ liveins: $edi, $edx, $esi
- %0:_(s32) = COPY %edi
- %3:_(s32) = COPY %esi
+ %0:_(s32) = COPY $edi
+ %3:_(s32) = COPY $esi
%1:_(s1) = G_TRUNC %3(s32)
- %4:_(s32) = COPY %edx
+ %4:_(s32) = COPY $edx
%2:_(s1) = G_TRUNC %4(s32)
%5:_(s32) = G_CONSTANT i32 0
%6:_(s1) = G_ICMP intpred(sgt), %0(s32), %5
@@ -180,8 +180,8 @@
bb.3.cond.end:
%7:_(s1) = G_PHI %2(s1), %bb.2, %1(s1), %bb.1
%8:_(s8) = G_ANYEXT %7(s1)
- %al = COPY %8(s8)
- RET 0, implicit %al
+ $al = COPY %8(s8)
+ RET 0, implicit $al
...
---
@@ -210,11 +210,11 @@
; ALL-LABEL: name: test_i8
; ALL: bb.0.entry:
; ALL: successors: %bb.2(0x40000000), %bb.1(0x40000000)
- ; ALL: liveins: %edi, %edx, %esi
- ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi
+ ; ALL: liveins: $edi, $edx, $esi
+ ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
; ALL: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
- ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY %edx
+ ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY $edx
; ALL: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY2]](s32)
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
@@ -223,16 +223,16 @@
; ALL: successors: %bb.2(0x80000000)
; ALL: bb.2.cond.end:
; ALL: [[PHI:%[0-9]+]]:_(s8) = G_PHI [[TRUNC1]](s8), %bb.1, [[TRUNC]](s8), %bb.0
- ; ALL: %al = COPY [[PHI]](s8)
- ; ALL: RET 0, implicit %al
+ ; ALL: $al = COPY [[PHI]](s8)
+ ; ALL: RET 0, implicit $al
bb.1.entry:
successors: %bb.3(0x40000000), %bb.2(0x40000000)
- liveins: %edi, %edx, %esi
+ liveins: $edi, $edx, $esi
- %0:_(s32) = COPY %edi
- %3:_(s32) = COPY %esi
+ %0:_(s32) = COPY $edi
+ %3:_(s32) = COPY $esi
%1:_(s8) = G_TRUNC %3(s32)
- %4:_(s32) = COPY %edx
+ %4:_(s32) = COPY $edx
%2:_(s8) = G_TRUNC %4(s32)
%5:_(s32) = G_CONSTANT i32 0
%6:_(s1) = G_ICMP intpred(sgt), %0(s32), %5
@@ -244,8 +244,8 @@
bb.3.cond.end:
%7:_(s8) = G_PHI %2(s8), %bb.2, %1(s8), %bb.1
- %al = COPY %7(s8)
- RET 0, implicit %al
+ $al = COPY %7(s8)
+ RET 0, implicit $al
...
---
@@ -274,11 +274,11 @@
; ALL-LABEL: name: test_i16
; ALL: bb.0.entry:
; ALL: successors: %bb.2(0x40000000), %bb.1(0x40000000)
- ; ALL: liveins: %edi, %edx, %esi
- ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi
+ ; ALL: liveins: $edi, $edx, $esi
+ ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
; ALL: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY %edx
+ ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY $edx
; ALL: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
@@ -287,16 +287,16 @@
; ALL: successors: %bb.2(0x80000000)
; ALL: bb.2.cond.end:
; ALL: [[PHI:%[0-9]+]]:_(s16) = G_PHI [[TRUNC1]](s16), %bb.1, [[TRUNC]](s16), %bb.0
- ; ALL: %ax = COPY [[PHI]](s16)
- ; ALL: RET 0, implicit %ax
+ ; ALL: $ax = COPY [[PHI]](s16)
+ ; ALL: RET 0, implicit $ax
bb.1.entry:
successors: %bb.3(0x40000000), %bb.2(0x40000000)
- liveins: %edi, %edx, %esi
+ liveins: $edi, $edx, $esi
- %0:_(s32) = COPY %edi
- %3:_(s32) = COPY %esi
+ %0:_(s32) = COPY $edi
+ %3:_(s32) = COPY $esi
%1:_(s16) = G_TRUNC %3(s32)
- %4:_(s32) = COPY %edx
+ %4:_(s32) = COPY $edx
%2:_(s16) = G_TRUNC %4(s32)
%5:_(s32) = G_CONSTANT i32 0
%6:_(s1) = G_ICMP intpred(sgt), %0(s32), %5
@@ -308,8 +308,8 @@
bb.3.cond.end:
%7:_(s16) = G_PHI %2(s16), %bb.2, %1(s16), %bb.1
- %ax = COPY %7(s16)
- RET 0, implicit %ax
+ $ax = COPY %7(s16)
+ RET 0, implicit $ax
...
---
@@ -336,10 +336,10 @@
; ALL-LABEL: name: test_i32
; ALL: bb.0.entry:
; ALL: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; ALL: liveins: %edi, %edx, %esi
- ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi
- ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY %edx
+ ; ALL: liveins: $edi, $edx, $esi
+ ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
+ ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY $edx
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
; ALL: G_BRCOND [[ICMP]](s1), %bb.1
@@ -351,15 +351,15 @@
; ALL: successors: %bb.3(0x80000000)
; ALL: bb.3.cond.end:
; ALL: [[PHI:%[0-9]+]]:_(s32) = G_PHI [[COPY1]](s32), %bb.1, [[COPY2]](s32), %bb.2
- ; ALL: %eax = COPY [[PHI]](s32)
- ; ALL: RET 0, implicit %eax
+ ; ALL: $eax = COPY [[PHI]](s32)
+ ; ALL: RET 0, implicit $eax
bb.1.entry:
successors: %bb.2(0x40000000), %bb.3(0x40000000)
- liveins: %edi, %edx, %esi
+ liveins: $edi, $edx, $esi
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
- %2(s32) = COPY %edx
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
+ %2(s32) = COPY $edx
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
G_BRCOND %4(s1), %bb.2
@@ -376,8 +376,8 @@
bb.4.cond.end:
%5(s32) = G_PHI %1(s32), %bb.2, %2(s32), %bb.3
- %eax = COPY %5(s32)
- RET 0, implicit %eax
+ $eax = COPY %5(s32)
+ RET 0, implicit $eax
...
---
@@ -404,10 +404,10 @@
; ALL-LABEL: name: test_i64
; ALL: bb.0.entry:
; ALL: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; ALL: liveins: %edi, %rdx, %rsi
- ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:_(s64) = COPY %rsi
- ; ALL: [[COPY2:%[0-9]+]]:_(s64) = COPY %rdx
+ ; ALL: liveins: $edi, $rdx, $rsi
+ ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:_(s64) = COPY $rsi
+ ; ALL: [[COPY2:%[0-9]+]]:_(s64) = COPY $rdx
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
; ALL: G_BRCOND [[ICMP]](s1), %bb.1
@@ -419,15 +419,15 @@
; ALL: successors: %bb.3(0x80000000)
; ALL: bb.3.cond.end:
; ALL: [[PHI:%[0-9]+]]:_(s64) = G_PHI [[COPY1]](s64), %bb.1, [[COPY2]](s64), %bb.2
- ; ALL: %rax = COPY [[PHI]](s64)
- ; ALL: RET 0, implicit %rax
+ ; ALL: $rax = COPY [[PHI]](s64)
+ ; ALL: RET 0, implicit $rax
bb.1.entry:
successors: %bb.2(0x40000000), %bb.3(0x40000000)
- liveins: %edi, %rdx, %rsi
+ liveins: $edi, $rdx, $rsi
- %0(s32) = COPY %edi
- %1(s64) = COPY %rsi
- %2(s64) = COPY %rdx
+ %0(s32) = COPY $edi
+ %1(s64) = COPY $rsi
+ %2(s64) = COPY $rdx
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
G_BRCOND %4(s1), %bb.2
@@ -444,8 +444,8 @@
bb.4.cond.end:
%5(s64) = G_PHI %1(s64), %bb.2, %2(s64), %bb.3
- %rax = COPY %5(s64)
- RET 0, implicit %rax
+ $rax = COPY %5(s64)
+ RET 0, implicit $rax
...
---
@@ -475,10 +475,10 @@
; ALL-LABEL: name: test_float
; ALL: bb.0.{{[a-zA-Z0-9]+}}:
; ALL: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; ALL: liveins: %edi, %xmm0, %xmm1
- ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY %xmm0
- ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY %xmm1
+ ; ALL: liveins: $edi, $xmm0, $xmm1
+ ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY $xmm0
+ ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY $xmm1
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
; ALL: G_BRCOND [[ICMP]](s1), %bb.1
@@ -490,15 +490,15 @@
; ALL: successors: %bb.3(0x80000000)
; ALL: bb.3.cond.end:
; ALL: [[PHI:%[0-9]+]]:_(s32) = G_PHI [[COPY1]](s32), %bb.1, [[COPY2]](s32), %bb.2
- ; ALL: %xmm0 = COPY [[PHI]](s32)
- ; ALL: RET 0, implicit %xmm0
+ ; ALL: $xmm0 = COPY [[PHI]](s32)
+ ; ALL: RET 0, implicit $xmm0
bb.1.entry:
successors: %bb.2(0x40000000), %bb.3(0x40000000)
- liveins: %edi, %xmm0, %xmm1
+ liveins: $edi, $xmm0, $xmm1
- %0(s32) = COPY %edi
- %1(s32) = COPY %xmm0
- %2(s32) = COPY %xmm1
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $xmm0
+ %2(s32) = COPY $xmm1
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
G_BRCOND %4(s1), %bb.2
@@ -515,8 +515,8 @@
bb.4.cond.end:
%5(s32) = G_PHI %1(s32), %bb.2, %2(s32), %bb.3
- %xmm0 = COPY %5(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %5(s32)
+ RET 0, implicit $xmm0
...
---
@@ -543,10 +543,10 @@
; ALL-LABEL: name: test_double
; ALL: bb.0.{{[a-zA-Z0-9]+}}:
; ALL: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; ALL: liveins: %edi, %xmm0, %xmm1
- ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:_(s64) = COPY %xmm0
- ; ALL: [[COPY2:%[0-9]+]]:_(s64) = COPY %xmm1
+ ; ALL: liveins: $edi, $xmm0, $xmm1
+ ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:_(s64) = COPY $xmm0
+ ; ALL: [[COPY2:%[0-9]+]]:_(s64) = COPY $xmm1
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
; ALL: G_BRCOND [[ICMP]](s1), %bb.1
@@ -558,15 +558,15 @@
; ALL: successors: %bb.3(0x80000000)
; ALL: bb.3.cond.end:
; ALL: [[PHI:%[0-9]+]]:_(s64) = G_PHI [[COPY1]](s64), %bb.1, [[COPY2]](s64), %bb.2
- ; ALL: %xmm0 = COPY [[PHI]](s64)
- ; ALL: RET 0, implicit %xmm0
+ ; ALL: $xmm0 = COPY [[PHI]](s64)
+ ; ALL: RET 0, implicit $xmm0
bb.1.entry:
successors: %bb.2(0x40000000), %bb.3(0x40000000)
- liveins: %edi, %xmm0, %xmm1
+ liveins: $edi, $xmm0, $xmm1
- %0(s32) = COPY %edi
- %1(s64) = COPY %xmm0
- %2(s64) = COPY %xmm1
+ %0(s32) = COPY $edi
+ %1(s64) = COPY $xmm0
+ %2(s64) = COPY $xmm1
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
G_BRCOND %4(s1), %bb.2
@@ -583,7 +583,7 @@
bb.4.cond.end:
%5(s64) = G_PHI %1(s64), %bb.2, %2(s64), %bb.3
- %xmm0 = COPY %5(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %5(s64)
+ RET 0, implicit $xmm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-sub-v128.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-sub-v128.mir
index c4314dd..e682f24 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-sub-v128.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-sub-v128.mir
@@ -33,7 +33,7 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; ALL-LABEL: name: test_sub_v16i8
; ALL: [[DEF:%[0-9]+]]:_(<16 x s8>) = IMPLICIT_DEF
@@ -43,7 +43,7 @@
%0(<16 x s8>) = IMPLICIT_DEF
%1(<16 x s8>) = IMPLICIT_DEF
%2(<16 x s8>) = G_SUB %0, %1
- %xmm0 = COPY %2
+ $xmm0 = COPY %2
RET 0
...
@@ -58,7 +58,7 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; ALL-LABEL: name: test_sub_v8i16
; ALL: [[DEF:%[0-9]+]]:_(<8 x s16>) = IMPLICIT_DEF
@@ -68,7 +68,7 @@
%0(<8 x s16>) = IMPLICIT_DEF
%1(<8 x s16>) = IMPLICIT_DEF
%2(<8 x s16>) = G_SUB %0, %1
- %xmm0 = COPY %2
+ $xmm0 = COPY %2
RET 0
...
@@ -83,7 +83,7 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; ALL-LABEL: name: test_sub_v4i32
; ALL: [[DEF:%[0-9]+]]:_(<4 x s32>) = IMPLICIT_DEF
@@ -93,7 +93,7 @@
%0(<4 x s32>) = IMPLICIT_DEF
%1(<4 x s32>) = IMPLICIT_DEF
%2(<4 x s32>) = G_SUB %0, %1
- %xmm0 = COPY %2
+ $xmm0 = COPY %2
RET 0
...
@@ -108,7 +108,7 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; ALL-LABEL: name: test_sub_v2i64
; ALL: [[DEF:%[0-9]+]]:_(<2 x s64>) = IMPLICIT_DEF
@@ -118,7 +118,7 @@
%0(<2 x s64>) = IMPLICIT_DEF
%1(<2 x s64>) = IMPLICIT_DEF
%2(<2 x s64>) = G_SUB %0, %1
- %xmm0 = COPY %2
+ $xmm0 = COPY %2
RET 0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-sub-v256.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-sub-v256.mir
index 7f4a6d7..5e35f1e 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-sub-v256.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-sub-v256.mir
@@ -34,7 +34,7 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; ALL-LABEL: name: test_sub_v32i8
; ALL: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
@@ -44,7 +44,7 @@
%0(<32 x s8>) = IMPLICIT_DEF
%1(<32 x s8>) = IMPLICIT_DEF
%2(<32 x s8>) = G_SUB %0, %1
- %ymm0 = COPY %2
+ $ymm0 = COPY %2
RET 0
...
@@ -59,7 +59,7 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; ALL-LABEL: name: test_sub_v16i16
; ALL: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
@@ -69,7 +69,7 @@
%0(<16 x s16>) = IMPLICIT_DEF
%1(<16 x s16>) = IMPLICIT_DEF
%2(<16 x s16>) = G_SUB %0, %1
- %ymm0 = COPY %2
+ $ymm0 = COPY %2
RET 0
...
@@ -84,7 +84,7 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; ALL-LABEL: name: test_sub_v8i32
; ALL: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
@@ -94,7 +94,7 @@
%0(<8 x s32>) = IMPLICIT_DEF
%1(<8 x s32>) = IMPLICIT_DEF
%2(<8 x s32>) = G_SUB %0, %1
- %ymm0 = COPY %2
+ $ymm0 = COPY %2
RET 0
...
@@ -109,7 +109,7 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; ALL-LABEL: name: test_sub_v4i64
; ALL: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
@@ -119,7 +119,7 @@
%0(<4 x s64>) = IMPLICIT_DEF
%1(<4 x s64>) = IMPLICIT_DEF
%2(<4 x s64>) = G_SUB %0, %1
- %ymm0 = COPY %2
+ $ymm0 = COPY %2
RET 0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-sub-v512.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-sub-v512.mir
index 2271d36..271ea3a 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-sub-v512.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-sub-v512.mir
@@ -34,7 +34,7 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_sub_v64i8
; ALL: [[DEF:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
@@ -44,7 +44,7 @@
%0(<64 x s8>) = IMPLICIT_DEF
%1(<64 x s8>) = IMPLICIT_DEF
%2(<64 x s8>) = G_SUB %0, %1
- %zmm0 = COPY %2
+ $zmm0 = COPY %2
RET 0
...
@@ -59,7 +59,7 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_sub_v32i16
; ALL: [[DEF:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
@@ -69,7 +69,7 @@
%0(<32 x s16>) = IMPLICIT_DEF
%1(<32 x s16>) = IMPLICIT_DEF
%2(<32 x s16>) = G_SUB %0, %1
- %zmm0 = COPY %2
+ $zmm0 = COPY %2
RET 0
...
@@ -84,7 +84,7 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_sub_v16i32
; ALL: [[DEF:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
@@ -94,7 +94,7 @@
%0(<16 x s32>) = IMPLICIT_DEF
%1(<16 x s32>) = IMPLICIT_DEF
%2(<16 x s32>) = G_SUB %0, %1
- %zmm0 = COPY %2
+ $zmm0 = COPY %2
RET 0
...
@@ -109,7 +109,7 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_sub_v8i64
; ALL: [[DEF:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
@@ -119,7 +119,7 @@
%0(<8 x s64>) = IMPLICIT_DEF
%1(<8 x s64>) = IMPLICIT_DEF
%2(<8 x s64>) = G_SUB %0, %1
- %zmm0 = COPY %2
+ $zmm0 = COPY %2
RET 0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-sub.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-sub.mir
index 406967f..5ebe0e7 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-sub.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-sub.mir
@@ -24,7 +24,7 @@
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_sub_i1
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edx
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edx
; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; CHECK: [[SUB:%[0-9]+]]:_(s8) = G_SUB [[TRUNC]], [[TRUNC1]]
@@ -34,7 +34,7 @@
; CHECK: [[AND:%[0-9]+]]:_(s8) = G_AND [[COPY1]], [[C]]
; CHECK: G_STORE [[AND]](s8), [[DEF]](p0) :: (store 1)
; CHECK: RET 0
- %0(s32) = COPY %edx
+ %0(s32) = COPY $edx
%1(s1) = G_TRUNC %0(s32)
%2(s1) = G_SUB %1, %1
%3:_(p0) = G_IMPLICIT_DEF
@@ -53,19 +53,19 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_sub_i32
- ; CHECK: liveins: %edi, %esi
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
- ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi
+ ; CHECK: liveins: $edi, $esi
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
; CHECK: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK: %eax = COPY [[SUB]](s32)
- ; CHECK: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; CHECK: $eax = COPY [[SUB]](s32)
+ ; CHECK: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s32) = G_SUB %0, %1
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-xor-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-xor-scalar.mir
index 3213ce1..5f397df 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-xor-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-xor-scalar.mir
@@ -41,12 +41,12 @@
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_xor_i1
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edx
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edx
; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; CHECK: [[XOR:%[0-9]+]]:_(s8) = G_XOR [[TRUNC]], [[TRUNC1]]
; CHECK: RET 0
- %0(s32) = COPY %edx
+ %0(s32) = COPY $edx
%1(s1) = G_TRUNC %0(s32)
%2(s1) = G_XOR %1, %1
%3:_(p0) = G_IMPLICIT_DEF
@@ -70,12 +70,12 @@
; CHECK-LABEL: name: test_xor_i8
; CHECK: [[DEF:%[0-9]+]]:_(s8) = IMPLICIT_DEF
; CHECK: [[XOR:%[0-9]+]]:_(s8) = G_XOR [[DEF]], [[DEF]]
- ; CHECK: %al = COPY [[XOR]](s8)
- ; CHECK: RET 0, implicit %al
+ ; CHECK: $al = COPY [[XOR]](s8)
+ ; CHECK: RET 0, implicit $al
%0(s8) = IMPLICIT_DEF
%1(s8) = G_XOR %0, %0
- %al = COPY %1(s8)
- RET 0, implicit %al
+ $al = COPY %1(s8)
+ RET 0, implicit $al
...
---
@@ -95,12 +95,12 @@
; CHECK-LABEL: name: test_xor_i16
; CHECK: [[DEF:%[0-9]+]]:_(s16) = IMPLICIT_DEF
; CHECK: [[XOR:%[0-9]+]]:_(s16) = G_XOR [[DEF]], [[DEF]]
- ; CHECK: %ax = COPY [[XOR]](s16)
- ; CHECK: RET 0, implicit %ax
+ ; CHECK: $ax = COPY [[XOR]](s16)
+ ; CHECK: RET 0, implicit $ax
%0(s16) = IMPLICIT_DEF
%1(s16) = G_XOR %0, %0
- %ax = COPY %1(s16)
- RET 0, implicit %ax
+ $ax = COPY %1(s16)
+ RET 0, implicit $ax
...
---
@@ -120,12 +120,12 @@
; CHECK-LABEL: name: test_xor_i32
; CHECK: [[DEF:%[0-9]+]]:_(s32) = IMPLICIT_DEF
; CHECK: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[DEF]], [[DEF]]
- ; CHECK: %eax = COPY [[XOR]](s32)
- ; CHECK: RET 0, implicit %eax
+ ; CHECK: $eax = COPY [[XOR]](s32)
+ ; CHECK: RET 0, implicit $eax
%0(s32) = IMPLICIT_DEF
%1(s32) = G_XOR %0, %0
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -145,11 +145,11 @@
; CHECK-LABEL: name: test_xor_i64
; CHECK: [[DEF:%[0-9]+]]:_(s64) = IMPLICIT_DEF
; CHECK: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[DEF]], [[DEF]]
- ; CHECK: %rax = COPY [[XOR]](s64)
- ; CHECK: RET 0, implicit %rax
+ ; CHECK: $rax = COPY [[XOR]](s64)
+ ; CHECK: RET 0, implicit $rax
%0(s64) = IMPLICIT_DEF
%1(s64) = G_XOR %0, %0
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir
index ca238b2..98a6693 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir
@@ -107,12 +107,12 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
- %0(p0) = COPY %rdi
+ %0(p0) = COPY $rdi
%1(<8 x s32>) = G_LOAD %0(p0) :: (load 32 from %ir.p1, align 1)
- %ymm0 = COPY %1(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %1(<8 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -129,10 +129,10 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %ymm0
+ liveins: $rdi, $ymm0
- %0(<8 x s32>) = COPY %ymm0
- %1(p0) = COPY %rdi
+ %0(<8 x s32>) = COPY $ymm0
+ %1(p0) = COPY $rdi
G_STORE %0(<8 x s32>), %1(p0) :: (store 32 into %ir.p1, align 1)
RET 0
diff --git a/llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir
index c94ecc8..db34aef 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir
@@ -100,12 +100,12 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
- %0(p0) = COPY %rdi
+ %0(p0) = COPY $rdi
%1(<16 x s32>) = G_LOAD %0(p0) :: (load 64 from %ir.p1, align 1)
- %zmm0 = COPY %1(<16 x s32>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %1(<16 x s32>)
+ RET 0, implicit $zmm0
...
---
@@ -122,10 +122,10 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %zmm0
+ liveins: $rdi, $zmm0
- %0(<16 x s32>) = COPY %zmm0
- %1(p0) = COPY %rdi
+ %0(<16 x s32>) = COPY $zmm0
+ %1(p0) = COPY $rdi
G_STORE %0(<16 x s32>), %1(p0) :: (store 64 into %ir.p1, align 1)
RET 0
diff --git a/llvm/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
index 7af4d18..971a784 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
@@ -256,27 +256,27 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; FAST-LABEL: name: test_add_i8
- ; FAST: liveins: %edi, %esi
- ; FAST: [[COPY:%[0-9]+]]:gpr(s8) = COPY %dil
- ; FAST: [[COPY1:%[0-9]+]]:gpr(s8) = COPY %sil
+ ; FAST: liveins: $edi, $esi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(s8) = COPY $dil
+ ; FAST: [[COPY1:%[0-9]+]]:gpr(s8) = COPY $sil
; FAST: [[ADD:%[0-9]+]]:gpr(s8) = G_ADD [[COPY]], [[COPY1]]
- ; FAST: %al = COPY [[ADD]](s8)
- ; FAST: RET 0, implicit %al
+ ; FAST: $al = COPY [[ADD]](s8)
+ ; FAST: RET 0, implicit $al
; GREEDY-LABEL: name: test_add_i8
- ; GREEDY: liveins: %edi, %esi
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(s8) = COPY %dil
- ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s8) = COPY %sil
+ ; GREEDY: liveins: $edi, $esi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(s8) = COPY $dil
+ ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s8) = COPY $sil
; GREEDY: [[ADD:%[0-9]+]]:gpr(s8) = G_ADD [[COPY]], [[COPY1]]
- ; GREEDY: %al = COPY [[ADD]](s8)
- ; GREEDY: RET 0, implicit %al
- %0(s8) = COPY %dil
- %1(s8) = COPY %sil
+ ; GREEDY: $al = COPY [[ADD]](s8)
+ ; GREEDY: RET 0, implicit $al
+ %0(s8) = COPY $dil
+ %1(s8) = COPY $sil
%2(s8) = G_ADD %0, %1
- %al = COPY %2(s8)
- RET 0, implicit %al
+ $al = COPY %2(s8)
+ RET 0, implicit $al
...
---
@@ -292,27 +292,27 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; FAST-LABEL: name: test_add_i16
- ; FAST: liveins: %edi, %esi
- ; FAST: [[COPY:%[0-9]+]]:gpr(s16) = COPY %di
- ; FAST: [[COPY1:%[0-9]+]]:gpr(s16) = COPY %si
+ ; FAST: liveins: $edi, $esi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(s16) = COPY $di
+ ; FAST: [[COPY1:%[0-9]+]]:gpr(s16) = COPY $si
; FAST: [[ADD:%[0-9]+]]:gpr(s16) = G_ADD [[COPY]], [[COPY1]]
- ; FAST: %ax = COPY [[ADD]](s16)
- ; FAST: RET 0, implicit %ax
+ ; FAST: $ax = COPY [[ADD]](s16)
+ ; FAST: RET 0, implicit $ax
; GREEDY-LABEL: name: test_add_i16
- ; GREEDY: liveins: %edi, %esi
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(s16) = COPY %di
- ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s16) = COPY %si
+ ; GREEDY: liveins: $edi, $esi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(s16) = COPY $di
+ ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s16) = COPY $si
; GREEDY: [[ADD:%[0-9]+]]:gpr(s16) = G_ADD [[COPY]], [[COPY1]]
- ; GREEDY: %ax = COPY [[ADD]](s16)
- ; GREEDY: RET 0, implicit %ax
- %0(s16) = COPY %di
- %1(s16) = COPY %si
+ ; GREEDY: $ax = COPY [[ADD]](s16)
+ ; GREEDY: RET 0, implicit $ax
+ %0(s16) = COPY $di
+ %1(s16) = COPY $si
%2(s16) = G_ADD %0, %1
- %ax = COPY %2(s16)
- RET 0, implicit %ax
+ $ax = COPY %2(s16)
+ RET 0, implicit $ax
...
---
@@ -328,27 +328,27 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; FAST-LABEL: name: test_add_i32
- ; FAST: liveins: %edi, %esi
- ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
- ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi
+ ; FAST: liveins: $edi, $esi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
+ ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
; FAST: [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[COPY]], [[COPY1]]
- ; FAST: %eax = COPY [[ADD]](s32)
- ; FAST: RET 0, implicit %eax
+ ; FAST: $eax = COPY [[ADD]](s32)
+ ; FAST: RET 0, implicit $eax
; GREEDY-LABEL: name: test_add_i32
- ; GREEDY: liveins: %edi, %esi
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
- ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi
+ ; GREEDY: liveins: $edi, $esi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
+ ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
; GREEDY: [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[COPY]], [[COPY1]]
- ; GREEDY: %eax = COPY [[ADD]](s32)
- ; GREEDY: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; GREEDY: $eax = COPY [[ADD]](s32)
+ ; GREEDY: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s32) = G_ADD %0, %1
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -364,27 +364,27 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
; FAST-LABEL: name: test_add_i64
- ; FAST: liveins: %rdi, %rsi
- ; FAST: [[COPY:%[0-9]+]]:gpr(s64) = COPY %rdi
- ; FAST: [[COPY1:%[0-9]+]]:gpr(s64) = COPY %rsi
+ ; FAST: liveins: $rdi, $rsi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(s64) = COPY $rdi
+ ; FAST: [[COPY1:%[0-9]+]]:gpr(s64) = COPY $rsi
; FAST: [[ADD:%[0-9]+]]:gpr(s64) = G_ADD [[COPY]], [[COPY1]]
- ; FAST: %rax = COPY [[ADD]](s64)
- ; FAST: RET 0, implicit %rax
+ ; FAST: $rax = COPY [[ADD]](s64)
+ ; FAST: RET 0, implicit $rax
; GREEDY-LABEL: name: test_add_i64
- ; GREEDY: liveins: %rdi, %rsi
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(s64) = COPY %rdi
- ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s64) = COPY %rsi
+ ; GREEDY: liveins: $rdi, $rsi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(s64) = COPY $rdi
+ ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s64) = COPY $rsi
; GREEDY: [[ADD:%[0-9]+]]:gpr(s64) = G_ADD [[COPY]], [[COPY1]]
- ; GREEDY: %rax = COPY [[ADD]](s64)
- ; GREEDY: RET 0, implicit %rax
- %0(s64) = COPY %rdi
- %1(s64) = COPY %rsi
+ ; GREEDY: $rax = COPY [[ADD]](s64)
+ ; GREEDY: RET 0, implicit $rax
+ %0(s64) = COPY $rdi
+ %1(s64) = COPY $rsi
%2(s64) = G_ADD %0, %1
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
---
@@ -449,27 +449,27 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; FAST-LABEL: name: test_add_float
- ; FAST: liveins: %xmm0, %xmm1
- ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY %xmm0
- ; FAST: [[COPY1:%[0-9]+]]:vecr(s32) = COPY %xmm1
+ ; FAST: liveins: $xmm0, $xmm1
+ ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0
+ ; FAST: [[COPY1:%[0-9]+]]:vecr(s32) = COPY $xmm1
; FAST: [[FADD:%[0-9]+]]:vecr(s32) = G_FADD [[COPY]], [[COPY1]]
- ; FAST: %xmm0 = COPY [[FADD]](s32)
- ; FAST: RET 0, implicit %xmm0
+ ; FAST: $xmm0 = COPY [[FADD]](s32)
+ ; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_add_float
- ; GREEDY: liveins: %xmm0, %xmm1
- ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY %xmm0
- ; GREEDY: [[COPY1:%[0-9]+]]:vecr(s32) = COPY %xmm1
+ ; GREEDY: liveins: $xmm0, $xmm1
+ ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0
+ ; GREEDY: [[COPY1:%[0-9]+]]:vecr(s32) = COPY $xmm1
; GREEDY: [[FADD:%[0-9]+]]:vecr(s32) = G_FADD [[COPY]], [[COPY1]]
- ; GREEDY: %xmm0 = COPY [[FADD]](s32)
- ; GREEDY: RET 0, implicit %xmm0
- %0(s32) = COPY %xmm0
- %1(s32) = COPY %xmm1
+ ; GREEDY: $xmm0 = COPY [[FADD]](s32)
+ ; GREEDY: RET 0, implicit $xmm0
+ %0(s32) = COPY $xmm0
+ %1(s32) = COPY $xmm1
%2(s32) = G_FADD %0, %1
- %xmm0 = COPY %2(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s32)
+ RET 0, implicit $xmm0
...
---
@@ -485,27 +485,27 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; FAST-LABEL: name: test_add_double
- ; FAST: liveins: %xmm0, %xmm1
- ; FAST: [[COPY:%[0-9]+]]:vecr(s64) = COPY %xmm0
- ; FAST: [[COPY1:%[0-9]+]]:vecr(s64) = COPY %xmm1
+ ; FAST: liveins: $xmm0, $xmm1
+ ; FAST: [[COPY:%[0-9]+]]:vecr(s64) = COPY $xmm0
+ ; FAST: [[COPY1:%[0-9]+]]:vecr(s64) = COPY $xmm1
; FAST: [[FADD:%[0-9]+]]:vecr(s64) = G_FADD [[COPY]], [[COPY1]]
- ; FAST: %xmm0 = COPY [[FADD]](s64)
- ; FAST: RET 0, implicit %xmm0
+ ; FAST: $xmm0 = COPY [[FADD]](s64)
+ ; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_add_double
- ; GREEDY: liveins: %xmm0, %xmm1
- ; GREEDY: [[COPY:%[0-9]+]]:vecr(s64) = COPY %xmm0
- ; GREEDY: [[COPY1:%[0-9]+]]:vecr(s64) = COPY %xmm1
+ ; GREEDY: liveins: $xmm0, $xmm1
+ ; GREEDY: [[COPY:%[0-9]+]]:vecr(s64) = COPY $xmm0
+ ; GREEDY: [[COPY1:%[0-9]+]]:vecr(s64) = COPY $xmm1
; GREEDY: [[FADD:%[0-9]+]]:vecr(s64) = G_FADD [[COPY]], [[COPY1]]
- ; GREEDY: %xmm0 = COPY [[FADD]](s64)
- ; GREEDY: RET 0, implicit %xmm0
- %0(s64) = COPY %xmm0
- %1(s64) = COPY %xmm1
+ ; GREEDY: $xmm0 = COPY [[FADD]](s64)
+ ; GREEDY: RET 0, implicit $xmm0
+ %0(s64) = COPY $xmm0
+ %1(s64) = COPY $xmm1
%2(s64) = G_FADD %0, %1
- %xmm0 = COPY %2(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s64)
+ RET 0, implicit $xmm0
...
---
@@ -650,27 +650,27 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; FAST-LABEL: name: test_add_v4i32
- ; FAST: liveins: %xmm0, %xmm1
- ; FAST: [[COPY:%[0-9]+]]:vecr(<4 x s32>) = COPY %xmm0
- ; FAST: [[COPY1:%[0-9]+]]:vecr(<4 x s32>) = COPY %xmm1
+ ; FAST: liveins: $xmm0, $xmm1
+ ; FAST: [[COPY:%[0-9]+]]:vecr(<4 x s32>) = COPY $xmm0
+ ; FAST: [[COPY1:%[0-9]+]]:vecr(<4 x s32>) = COPY $xmm1
; FAST: [[ADD:%[0-9]+]]:vecr(<4 x s32>) = G_ADD [[COPY]], [[COPY1]]
- ; FAST: %xmm0 = COPY [[ADD]](<4 x s32>)
- ; FAST: RET 0, implicit %xmm0
+ ; FAST: $xmm0 = COPY [[ADD]](<4 x s32>)
+ ; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_add_v4i32
- ; GREEDY: liveins: %xmm0, %xmm1
- ; GREEDY: [[COPY:%[0-9]+]]:vecr(<4 x s32>) = COPY %xmm0
- ; GREEDY: [[COPY1:%[0-9]+]]:vecr(<4 x s32>) = COPY %xmm1
+ ; GREEDY: liveins: $xmm0, $xmm1
+ ; GREEDY: [[COPY:%[0-9]+]]:vecr(<4 x s32>) = COPY $xmm0
+ ; GREEDY: [[COPY1:%[0-9]+]]:vecr(<4 x s32>) = COPY $xmm1
; GREEDY: [[ADD:%[0-9]+]]:vecr(<4 x s32>) = G_ADD [[COPY]], [[COPY1]]
- ; GREEDY: %xmm0 = COPY [[ADD]](<4 x s32>)
- ; GREEDY: RET 0, implicit %xmm0
- %0(<4 x s32>) = COPY %xmm0
- %1(<4 x s32>) = COPY %xmm1
+ ; GREEDY: $xmm0 = COPY [[ADD]](<4 x s32>)
+ ; GREEDY: RET 0, implicit $xmm0
+ %0(<4 x s32>) = COPY $xmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<4 x s32>) = G_ADD %0, %1
- %xmm0 = COPY %2(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -686,27 +686,27 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; FAST-LABEL: name: test_add_v4f32
- ; FAST: liveins: %xmm0, %xmm1
- ; FAST: [[COPY:%[0-9]+]]:vecr(<4 x s32>) = COPY %xmm0
- ; FAST: [[COPY1:%[0-9]+]]:vecr(<4 x s32>) = COPY %xmm1
+ ; FAST: liveins: $xmm0, $xmm1
+ ; FAST: [[COPY:%[0-9]+]]:vecr(<4 x s32>) = COPY $xmm0
+ ; FAST: [[COPY1:%[0-9]+]]:vecr(<4 x s32>) = COPY $xmm1
; FAST: [[FADD:%[0-9]+]]:vecr(<4 x s32>) = G_FADD [[COPY]], [[COPY1]]
- ; FAST: %xmm0 = COPY [[FADD]](<4 x s32>)
- ; FAST: RET 0, implicit %xmm0
+ ; FAST: $xmm0 = COPY [[FADD]](<4 x s32>)
+ ; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_add_v4f32
- ; GREEDY: liveins: %xmm0, %xmm1
- ; GREEDY: [[COPY:%[0-9]+]]:vecr(<4 x s32>) = COPY %xmm0
- ; GREEDY: [[COPY1:%[0-9]+]]:vecr(<4 x s32>) = COPY %xmm1
+ ; GREEDY: liveins: $xmm0, $xmm1
+ ; GREEDY: [[COPY:%[0-9]+]]:vecr(<4 x s32>) = COPY $xmm0
+ ; GREEDY: [[COPY1:%[0-9]+]]:vecr(<4 x s32>) = COPY $xmm1
; GREEDY: [[FADD:%[0-9]+]]:vecr(<4 x s32>) = G_FADD [[COPY]], [[COPY1]]
- ; GREEDY: %xmm0 = COPY [[FADD]](<4 x s32>)
- ; GREEDY: RET 0, implicit %xmm0
- %0(<4 x s32>) = COPY %xmm0
- %1(<4 x s32>) = COPY %xmm1
+ ; GREEDY: $xmm0 = COPY [[FADD]](<4 x s32>)
+ ; GREEDY: RET 0, implicit $xmm0
+ %0(<4 x s32>) = COPY $xmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<4 x s32>) = G_FADD %0, %1
- %xmm0 = COPY %2(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -720,22 +720,22 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; FAST-LABEL: name: test_load_i8
- ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; FAST: [[LOAD:%[0-9]+]]:gpr(s8) = G_LOAD [[COPY]](p0) :: (load 1 from %ir.p1)
- ; FAST: %al = COPY [[LOAD]](s8)
- ; FAST: RET 0, implicit %al
+ ; FAST: $al = COPY [[LOAD]](s8)
+ ; FAST: RET 0, implicit $al
; GREEDY-LABEL: name: test_load_i8
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; GREEDY: [[LOAD:%[0-9]+]]:gpr(s8) = G_LOAD [[COPY]](p0) :: (load 1 from %ir.p1)
- ; GREEDY: %al = COPY [[LOAD]](s8)
- ; GREEDY: RET 0, implicit %al
- %0(p0) = COPY %rdi
+ ; GREEDY: $al = COPY [[LOAD]](s8)
+ ; GREEDY: RET 0, implicit $al
+ %0(p0) = COPY $rdi
%1(s8) = G_LOAD %0(p0) :: (load 1 from %ir.p1)
- %al = COPY %1(s8)
- RET 0, implicit %al
+ $al = COPY %1(s8)
+ RET 0, implicit $al
...
---
@@ -749,22 +749,22 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; FAST-LABEL: name: test_load_i16
- ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; FAST: [[LOAD:%[0-9]+]]:gpr(s16) = G_LOAD [[COPY]](p0) :: (load 2 from %ir.p1)
- ; FAST: %ax = COPY [[LOAD]](s16)
- ; FAST: RET 0, implicit %ax
+ ; FAST: $ax = COPY [[LOAD]](s16)
+ ; FAST: RET 0, implicit $ax
; GREEDY-LABEL: name: test_load_i16
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; GREEDY: [[LOAD:%[0-9]+]]:gpr(s16) = G_LOAD [[COPY]](p0) :: (load 2 from %ir.p1)
- ; GREEDY: %ax = COPY [[LOAD]](s16)
- ; GREEDY: RET 0, implicit %ax
- %0(p0) = COPY %rdi
+ ; GREEDY: $ax = COPY [[LOAD]](s16)
+ ; GREEDY: RET 0, implicit $ax
+ %0(p0) = COPY $rdi
%1(s16) = G_LOAD %0(p0) :: (load 2 from %ir.p1)
- %ax = COPY %1(s16)
- RET 0, implicit %ax
+ $ax = COPY %1(s16)
+ RET 0, implicit $ax
...
---
@@ -778,22 +778,22 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; FAST-LABEL: name: test_load_i32
- ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; FAST: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.p1)
- ; FAST: %eax = COPY [[LOAD]](s32)
- ; FAST: RET 0, implicit %eax
+ ; FAST: $eax = COPY [[LOAD]](s32)
+ ; FAST: RET 0, implicit $eax
; GREEDY-LABEL: name: test_load_i32
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; GREEDY: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.p1)
- ; GREEDY: %eax = COPY [[LOAD]](s32)
- ; GREEDY: RET 0, implicit %eax
- %0(p0) = COPY %rdi
+ ; GREEDY: $eax = COPY [[LOAD]](s32)
+ ; GREEDY: RET 0, implicit $eax
+ %0(p0) = COPY $rdi
%1(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -808,22 +808,22 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; FAST-LABEL: name: test_load_i64
- ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; FAST: [[LOAD:%[0-9]+]]:gpr(s64) = G_LOAD [[COPY]](p0) :: (load 8 from %ir.p1)
- ; FAST: %rax = COPY [[LOAD]](s64)
- ; FAST: RET 0, implicit %rax
+ ; FAST: $rax = COPY [[LOAD]](s64)
+ ; FAST: RET 0, implicit $rax
; GREEDY-LABEL: name: test_load_i64
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; GREEDY: [[LOAD:%[0-9]+]]:gpr(s64) = G_LOAD [[COPY]](p0) :: (load 8 from %ir.p1)
- ; GREEDY: %rax = COPY [[LOAD]](s64)
- ; GREEDY: RET 0, implicit %rax
- %0(p0) = COPY %rdi
+ ; GREEDY: $rax = COPY [[LOAD]](s64)
+ ; GREEDY: RET 0, implicit $rax
+ %0(p0) = COPY $rdi
%1(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1)
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
---
@@ -837,22 +837,22 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; FAST-LABEL: name: test_load_float
- ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; FAST: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.p1)
- ; FAST: %xmm0 = COPY [[LOAD]](s32)
- ; FAST: RET 0, implicit %xmm0
+ ; FAST: $xmm0 = COPY [[LOAD]](s32)
+ ; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_load_float
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; GREEDY: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.p1)
- ; GREEDY: %xmm0 = COPY [[LOAD]](s32)
- ; GREEDY: RET 0, implicit %xmm0
- %0(p0) = COPY %rdi
+ ; GREEDY: $xmm0 = COPY [[LOAD]](s32)
+ ; GREEDY: RET 0, implicit $xmm0
+ %0(p0) = COPY $rdi
%1(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
- %xmm0 = COPY %1(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(s32)
+ RET 0, implicit $xmm0
...
---
@@ -866,22 +866,22 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; FAST-LABEL: name: test_load_double
- ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; FAST: [[LOAD:%[0-9]+]]:gpr(s64) = G_LOAD [[COPY]](p0) :: (load 8 from %ir.p1)
- ; FAST: %xmm0 = COPY [[LOAD]](s64)
- ; FAST: RET 0, implicit %xmm0
+ ; FAST: $xmm0 = COPY [[LOAD]](s64)
+ ; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_load_double
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; GREEDY: [[LOAD:%[0-9]+]]:gpr(s64) = G_LOAD [[COPY]](p0) :: (load 8 from %ir.p1)
- ; GREEDY: %xmm0 = COPY [[LOAD]](s64)
- ; GREEDY: RET 0, implicit %xmm0
- %0(p0) = COPY %rdi
+ ; GREEDY: $xmm0 = COPY [[LOAD]](s64)
+ ; GREEDY: RET 0, implicit $xmm0
+ %0(p0) = COPY $rdi
%1(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1)
- %xmm0 = COPY %1(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(s64)
+ RET 0, implicit $xmm0
...
---
@@ -895,22 +895,22 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; FAST-LABEL: name: test_load_v4i32
- ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; FAST: [[LOAD:%[0-9]+]]:vecr(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.p1, align 1)
- ; FAST: %xmm0 = COPY [[LOAD]](<4 x s32>)
- ; FAST: RET 0, implicit %xmm0
+ ; FAST: $xmm0 = COPY [[LOAD]](<4 x s32>)
+ ; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_load_v4i32
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; GREEDY: [[LOAD:%[0-9]+]]:vecr(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.p1, align 1)
- ; GREEDY: %xmm0 = COPY [[LOAD]](<4 x s32>)
- ; GREEDY: RET 0, implicit %xmm0
- %0(p0) = COPY %rdi
+ ; GREEDY: $xmm0 = COPY [[LOAD]](<4 x s32>)
+ ; GREEDY: RET 0, implicit $xmm0
+ %0(p0) = COPY $rdi
%1(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.p1, align 1)
- %xmm0 = COPY %1(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -924,25 +924,25 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %rsi
+ liveins: $edi, $rsi
; FAST-LABEL: name: test_store_i32
- ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
- ; FAST: [[COPY1:%[0-9]+]]:gpr(p0) = COPY %rsi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
+ ; FAST: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rsi
; FAST: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store 4 into %ir.p1)
- ; FAST: %rax = COPY [[COPY1]](p0)
- ; FAST: RET 0, implicit %rax
+ ; FAST: $rax = COPY [[COPY1]](p0)
+ ; FAST: RET 0, implicit $rax
; GREEDY-LABEL: name: test_store_i32
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
- ; GREEDY: [[COPY1:%[0-9]+]]:gpr(p0) = COPY %rsi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
+ ; GREEDY: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rsi
; GREEDY: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store 4 into %ir.p1)
- ; GREEDY: %rax = COPY [[COPY1]](p0)
- ; GREEDY: RET 0, implicit %rax
- %0(s32) = COPY %edi
- %1(p0) = COPY %rsi
+ ; GREEDY: $rax = COPY [[COPY1]](p0)
+ ; GREEDY: RET 0, implicit $rax
+ %0(s32) = COPY $edi
+ %1(p0) = COPY $rsi
G_STORE %0(s32), %1(p0) :: (store 4 into %ir.p1)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
+ $rax = COPY %1(p0)
+ RET 0, implicit $rax
...
---
@@ -956,25 +956,25 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
; FAST-LABEL: name: test_store_i64
- ; FAST: [[COPY:%[0-9]+]]:gpr(s64) = COPY %rdi
- ; FAST: [[COPY1:%[0-9]+]]:gpr(p0) = COPY %rsi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(s64) = COPY $rdi
+ ; FAST: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rsi
; FAST: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store 8 into %ir.p1)
- ; FAST: %rax = COPY [[COPY1]](p0)
- ; FAST: RET 0, implicit %rax
+ ; FAST: $rax = COPY [[COPY1]](p0)
+ ; FAST: RET 0, implicit $rax
; GREEDY-LABEL: name: test_store_i64
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(s64) = COPY %rdi
- ; GREEDY: [[COPY1:%[0-9]+]]:gpr(p0) = COPY %rsi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(s64) = COPY $rdi
+ ; GREEDY: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rsi
; GREEDY: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store 8 into %ir.p1)
- ; GREEDY: %rax = COPY [[COPY1]](p0)
- ; GREEDY: RET 0, implicit %rax
- %0(s64) = COPY %rdi
- %1(p0) = COPY %rsi
+ ; GREEDY: $rax = COPY [[COPY1]](p0)
+ ; GREEDY: RET 0, implicit $rax
+ %0(s64) = COPY $rdi
+ %1(p0) = COPY $rsi
G_STORE %0(s64), %1(p0) :: (store 8 into %ir.p1)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
+ $rax = COPY %1(p0)
+ RET 0, implicit $rax
...
---
@@ -991,29 +991,29 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %xmm0
+ liveins: $rdi, $xmm0
; FAST-LABEL: name: test_store_float
- ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY %xmm0
- ; FAST: [[COPY1:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0
+ ; FAST: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rdi
; FAST: [[COPY2:%[0-9]+]]:gpr(s32) = COPY [[COPY]](s32)
; FAST: G_STORE [[COPY2]](s32), [[COPY1]](p0) :: (store 4 into %ir.p1)
- ; FAST: %rax = COPY [[COPY1]](p0)
- ; FAST: RET 0, implicit %rax
+ ; FAST: $rax = COPY [[COPY1]](p0)
+ ; FAST: RET 0, implicit $rax
; GREEDY-LABEL: name: test_store_float
- ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY %xmm0
- ; GREEDY: [[COPY1:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0
+ ; GREEDY: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rdi
; GREEDY: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store 4 into %ir.p1)
- ; GREEDY: %rax = COPY [[COPY1]](p0)
- ; GREEDY: RET 0, implicit %rax
- %0(s32) = COPY %xmm0
- %1(p0) = COPY %rdi
+ ; GREEDY: $rax = COPY [[COPY1]](p0)
+ ; GREEDY: RET 0, implicit $rax
+ %0(s32) = COPY $xmm0
+ %1(p0) = COPY $rdi
G_STORE %0(s32), %1(p0) :: (store 4 into %ir.p1)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
+ $rax = COPY %1(p0)
+ RET 0, implicit $rax
...
---
@@ -1030,30 +1030,30 @@
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %xmm0
+ liveins: $rdi, $xmm0
; FAST-LABEL: name: test_store_double
- ; FAST: [[COPY:%[0-9]+]]:vecr(s64) = COPY %xmm0
- ; FAST: [[COPY1:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; FAST: [[COPY:%[0-9]+]]:vecr(s64) = COPY $xmm0
+ ; FAST: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rdi
; FAST: [[COPY2:%[0-9]+]]:gpr(s64) = COPY [[COPY]](s64)
; FAST: G_STORE [[COPY2]](s64), [[COPY1]](p0) :: (store 8 into %ir.p1)
- ; FAST: %rax = COPY [[COPY1]](p0)
- ; FAST: RET 0, implicit %rax
+ ; FAST: $rax = COPY [[COPY1]](p0)
+ ; FAST: RET 0, implicit $rax
; GREEDY-LABEL: name: test_store_double
- ; GREEDY: [[COPY:%[0-9]+]]:vecr(s64) = COPY %xmm0
- ; GREEDY: [[COPY1:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; GREEDY: [[COPY:%[0-9]+]]:vecr(s64) = COPY $xmm0
+ ; GREEDY: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rdi
; GREEDY: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store 8 into %ir.p1)
- ; GREEDY: %rax = COPY [[COPY1]](p0)
- ; GREEDY: RET 0, implicit %rax
- %0(s64) = COPY %xmm0
- %1(p0) = COPY %rdi
+ ; GREEDY: $rax = COPY [[COPY1]](p0)
+ ; GREEDY: RET 0, implicit $rax
+ %0(s64) = COPY $xmm0
+ %1(p0) = COPY $rdi
G_STORE %0(s64), %1(p0) :: (store 8 into %ir.p1)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
+ $rax = COPY %1(p0)
+ RET 0, implicit $rax
...
---
@@ -1160,34 +1160,34 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; FAST-LABEL: name: test_icmp_eq_i8
- ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
; FAST: [[TRUNC:%[0-9]+]]:gpr(s8) = G_TRUNC [[COPY]](s32)
- ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi
+ ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
; FAST: [[TRUNC1:%[0-9]+]]:gpr(s8) = G_TRUNC [[COPY1]](s32)
; FAST: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[TRUNC]](s8), [[TRUNC1]]
; FAST: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1)
- ; FAST: %al = COPY [[ANYEXT]](s8)
- ; FAST: RET 0, implicit %al
+ ; FAST: $al = COPY [[ANYEXT]](s8)
+ ; FAST: RET 0, implicit $al
; GREEDY-LABEL: name: test_icmp_eq_i8
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
; GREEDY: [[TRUNC:%[0-9]+]]:gpr(s8) = G_TRUNC [[COPY]](s32)
- ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi
+ ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
; GREEDY: [[TRUNC1:%[0-9]+]]:gpr(s8) = G_TRUNC [[COPY1]](s32)
; GREEDY: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[TRUNC]](s8), [[TRUNC1]]
; GREEDY: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1)
- ; GREEDY: %al = COPY [[ANYEXT]](s8)
- ; GREEDY: RET 0, implicit %al
- %2:_(s32) = COPY %edi
+ ; GREEDY: $al = COPY [[ANYEXT]](s8)
+ ; GREEDY: RET 0, implicit $al
+ %2:_(s32) = COPY $edi
%0:_(s8) = G_TRUNC %2(s32)
- %3:_(s32) = COPY %esi
+ %3:_(s32) = COPY $esi
%1:_(s8) = G_TRUNC %3(s32)
%4:_(s1) = G_ICMP intpred(eq), %0(s8), %1
%5:_(s8) = G_ANYEXT %4(s1)
- %al = COPY %5(s8)
- RET 0, implicit %al
+ $al = COPY %5(s8)
+ RET 0, implicit $al
...
---
@@ -1201,34 +1201,34 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; FAST-LABEL: name: test_icmp_eq_i16
- ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
; FAST: [[TRUNC:%[0-9]+]]:gpr(s16) = G_TRUNC [[COPY]](s32)
- ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi
+ ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
; FAST: [[TRUNC1:%[0-9]+]]:gpr(s16) = G_TRUNC [[COPY1]](s32)
; FAST: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[TRUNC]](s16), [[TRUNC1]]
; FAST: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1)
- ; FAST: %al = COPY [[ANYEXT]](s8)
- ; FAST: RET 0, implicit %al
+ ; FAST: $al = COPY [[ANYEXT]](s8)
+ ; FAST: RET 0, implicit $al
; GREEDY-LABEL: name: test_icmp_eq_i16
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
; GREEDY: [[TRUNC:%[0-9]+]]:gpr(s16) = G_TRUNC [[COPY]](s32)
- ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi
+ ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
; GREEDY: [[TRUNC1:%[0-9]+]]:gpr(s16) = G_TRUNC [[COPY1]](s32)
; GREEDY: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[TRUNC]](s16), [[TRUNC1]]
; GREEDY: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1)
- ; GREEDY: %al = COPY [[ANYEXT]](s8)
- ; GREEDY: RET 0, implicit %al
- %2:_(s32) = COPY %edi
+ ; GREEDY: $al = COPY [[ANYEXT]](s8)
+ ; GREEDY: RET 0, implicit $al
+ %2:_(s32) = COPY $edi
%0:_(s16) = G_TRUNC %2(s32)
- %3:_(s32) = COPY %esi
+ %3:_(s32) = COPY $esi
%1:_(s16) = G_TRUNC %3(s32)
%4:_(s1) = G_ICMP intpred(eq), %0(s16), %1
%5:_(s8) = G_ANYEXT %4(s1)
- %al = COPY %5(s8)
- RET 0, implicit %al
+ $al = COPY %5(s8)
+ RET 0, implicit $al
...
---
@@ -1242,28 +1242,28 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; FAST-LABEL: name: test_icmp_eq_i32
- ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
- ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
+ ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
; FAST: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[COPY]](s32), [[COPY1]]
; FAST: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1)
- ; FAST: %al = COPY [[ANYEXT]](s8)
- ; FAST: RET 0, implicit %al
+ ; FAST: $al = COPY [[ANYEXT]](s8)
+ ; FAST: RET 0, implicit $al
; GREEDY-LABEL: name: test_icmp_eq_i32
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
- ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
+ ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
; GREEDY: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[COPY]](s32), [[COPY1]]
; GREEDY: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1)
- ; GREEDY: %al = COPY [[ANYEXT]](s8)
- ; GREEDY: RET 0, implicit %al
- %0:_(s32) = COPY %edi
- %1:_(s32) = COPY %esi
+ ; GREEDY: $al = COPY [[ANYEXT]](s8)
+ ; GREEDY: RET 0, implicit $al
+ %0:_(s32) = COPY $edi
+ %1:_(s32) = COPY $esi
%2:_(s1) = G_ICMP intpred(eq), %0(s32), %1
%3:_(s8) = G_ANYEXT %2(s1)
- %al = COPY %3(s8)
- RET 0, implicit %al
+ $al = COPY %3(s8)
+ RET 0, implicit $al
...
---
@@ -1277,28 +1277,28 @@
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
; FAST-LABEL: name: test_icmp_eq_i64
- ; FAST: [[COPY:%[0-9]+]]:gpr(s64) = COPY %rdi
- ; FAST: [[COPY1:%[0-9]+]]:gpr(s64) = COPY %rsi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(s64) = COPY $rdi
+ ; FAST: [[COPY1:%[0-9]+]]:gpr(s64) = COPY $rsi
; FAST: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[COPY]](s64), [[COPY1]]
; FAST: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1)
- ; FAST: %al = COPY [[ANYEXT]](s8)
- ; FAST: RET 0, implicit %al
+ ; FAST: $al = COPY [[ANYEXT]](s8)
+ ; FAST: RET 0, implicit $al
; GREEDY-LABEL: name: test_icmp_eq_i64
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(s64) = COPY %rdi
- ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s64) = COPY %rsi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(s64) = COPY $rdi
+ ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s64) = COPY $rsi
; GREEDY: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[COPY]](s64), [[COPY1]]
; GREEDY: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1)
- ; GREEDY: %al = COPY [[ANYEXT]](s8)
- ; GREEDY: RET 0, implicit %al
- %0:_(s64) = COPY %rdi
- %1:_(s64) = COPY %rsi
+ ; GREEDY: $al = COPY [[ANYEXT]](s8)
+ ; GREEDY: RET 0, implicit $al
+ %0:_(s64) = COPY $rdi
+ %1:_(s64) = COPY $rsi
%2:_(s1) = G_ICMP intpred(eq), %0(s64), %1
%3:_(s8) = G_ANYEXT %2(s1)
- %al = COPY %3(s8)
- RET 0, implicit %al
+ $al = COPY %3(s8)
+ RET 0, implicit $al
...
---
@@ -1318,17 +1318,17 @@
; FAST-LABEL: name: test_xor_i8
; FAST: [[DEF:%[0-9]+]]:gpr(s8) = IMPLICIT_DEF
; FAST: [[XOR:%[0-9]+]]:gpr(s8) = G_XOR [[DEF]], [[DEF]]
- ; FAST: %al = COPY [[XOR]](s8)
- ; FAST: RET 0, implicit %al
+ ; FAST: $al = COPY [[XOR]](s8)
+ ; FAST: RET 0, implicit $al
; GREEDY-LABEL: name: test_xor_i8
; GREEDY: [[DEF:%[0-9]+]]:gpr(s8) = IMPLICIT_DEF
; GREEDY: [[XOR:%[0-9]+]]:gpr(s8) = G_XOR [[DEF]], [[DEF]]
- ; GREEDY: %al = COPY [[XOR]](s8)
- ; GREEDY: RET 0, implicit %al
+ ; GREEDY: $al = COPY [[XOR]](s8)
+ ; GREEDY: RET 0, implicit $al
%0(s8) = IMPLICIT_DEF
%1(s8) = G_XOR %0, %0
- %al = COPY %1(s8)
- RET 0, implicit %al
+ $al = COPY %1(s8)
+ RET 0, implicit $al
...
---
@@ -1348,17 +1348,17 @@
; FAST-LABEL: name: test_or_i16
; FAST: [[DEF:%[0-9]+]]:gpr(s16) = IMPLICIT_DEF
; FAST: [[OR:%[0-9]+]]:gpr(s16) = G_OR [[DEF]], [[DEF]]
- ; FAST: %ax = COPY [[OR]](s16)
- ; FAST: RET 0, implicit %ax
+ ; FAST: $ax = COPY [[OR]](s16)
+ ; FAST: RET 0, implicit $ax
; GREEDY-LABEL: name: test_or_i16
; GREEDY: [[DEF:%[0-9]+]]:gpr(s16) = IMPLICIT_DEF
; GREEDY: [[OR:%[0-9]+]]:gpr(s16) = G_OR [[DEF]], [[DEF]]
- ; GREEDY: %ax = COPY [[OR]](s16)
- ; GREEDY: RET 0, implicit %ax
+ ; GREEDY: $ax = COPY [[OR]](s16)
+ ; GREEDY: RET 0, implicit $ax
%0(s16) = IMPLICIT_DEF
%1(s16) = G_OR %0, %0
- %ax = COPY %1(s16)
- RET 0, implicit %ax
+ $ax = COPY %1(s16)
+ RET 0, implicit $ax
...
---
@@ -1378,17 +1378,17 @@
; FAST-LABEL: name: test_and_i32
; FAST: [[DEF:%[0-9]+]]:gpr(s32) = IMPLICIT_DEF
; FAST: [[AND:%[0-9]+]]:gpr(s32) = G_AND [[DEF]], [[DEF]]
- ; FAST: %eax = COPY [[AND]](s32)
- ; FAST: RET 0, implicit %eax
+ ; FAST: $eax = COPY [[AND]](s32)
+ ; FAST: RET 0, implicit $eax
; GREEDY-LABEL: name: test_and_i32
; GREEDY: [[DEF:%[0-9]+]]:gpr(s32) = IMPLICIT_DEF
; GREEDY: [[AND:%[0-9]+]]:gpr(s32) = G_AND [[DEF]], [[DEF]]
- ; GREEDY: %eax = COPY [[AND]](s32)
- ; GREEDY: RET 0, implicit %eax
+ ; GREEDY: $eax = COPY [[AND]](s32)
+ ; GREEDY: RET 0, implicit $eax
%0(s32) = IMPLICIT_DEF
%1(s32) = G_AND %0, %0
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -1408,17 +1408,17 @@
; FAST-LABEL: name: test_and_i64
; FAST: [[DEF:%[0-9]+]]:gpr(s64) = IMPLICIT_DEF
; FAST: [[AND:%[0-9]+]]:gpr(s64) = G_AND [[DEF]], [[DEF]]
- ; FAST: %rax = COPY [[AND]](s64)
- ; FAST: RET 0, implicit %rax
+ ; FAST: $rax = COPY [[AND]](s64)
+ ; FAST: RET 0, implicit $rax
; GREEDY-LABEL: name: test_and_i64
; GREEDY: [[DEF:%[0-9]+]]:gpr(s64) = IMPLICIT_DEF
; GREEDY: [[AND:%[0-9]+]]:gpr(s64) = G_AND [[DEF]], [[DEF]]
- ; GREEDY: %rax = COPY [[AND]](s64)
- ; GREEDY: RET 0, implicit %rax
+ ; GREEDY: $rax = COPY [[AND]](s64)
+ ; GREEDY: RET 0, implicit $rax
%0(s64) = IMPLICIT_DEF
%1(s64) = G_AND %0, %0
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
---
@@ -1432,15 +1432,15 @@
bb.1.entry:
; FAST-LABEL: name: test_global_ptrv
; FAST: [[GV:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @g_int
- ; FAST: %rax = COPY [[GV]](p0)
- ; FAST: RET 0, implicit %rax
+ ; FAST: $rax = COPY [[GV]](p0)
+ ; FAST: RET 0, implicit $rax
; GREEDY-LABEL: name: test_global_ptrv
; GREEDY: [[GV:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @g_int
- ; GREEDY: %rax = COPY [[GV]](p0)
- ; GREEDY: RET 0, implicit %rax
+ ; GREEDY: $rax = COPY [[GV]](p0)
+ ; GREEDY: RET 0, implicit $rax
%0(p0) = G_GLOBAL_VALUE @g_int
- %rax = COPY %0(p0)
- RET 0, implicit %rax
+ $rax = COPY %0(p0)
+ RET 0, implicit $rax
...
---
@@ -1458,15 +1458,15 @@
bb.1 (%ir-block.0):
; FAST-LABEL: name: test_undef
; FAST: [[DEF:%[0-9]+]]:gpr(s8) = G_IMPLICIT_DEF
- ; FAST: %al = COPY [[DEF]](s8)
- ; FAST: RET 0, implicit %al
+ ; FAST: $al = COPY [[DEF]](s8)
+ ; FAST: RET 0, implicit $al
; GREEDY-LABEL: name: test_undef
; GREEDY: [[DEF:%[0-9]+]]:gpr(s8) = G_IMPLICIT_DEF
- ; GREEDY: %al = COPY [[DEF]](s8)
- ; GREEDY: RET 0, implicit %al
+ ; GREEDY: $al = COPY [[DEF]](s8)
+ ; GREEDY: RET 0, implicit $al
%0(s8) = G_IMPLICIT_DEF
- %al = COPY %0(s8)
- RET 0, implicit %al
+ $al = COPY %0(s8)
+ RET 0, implicit $al
...
---
@@ -1484,25 +1484,25 @@
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; FAST-LABEL: name: test_undef2
- ; FAST: [[COPY:%[0-9]+]]:gpr(s8) = COPY %dil
+ ; FAST: [[COPY:%[0-9]+]]:gpr(s8) = COPY $dil
; FAST: [[DEF:%[0-9]+]]:gpr(s8) = G_IMPLICIT_DEF
; FAST: [[ADD:%[0-9]+]]:gpr(s8) = G_ADD [[COPY]], [[DEF]]
- ; FAST: %al = COPY [[ADD]](s8)
- ; FAST: RET 0, implicit %al
+ ; FAST: $al = COPY [[ADD]](s8)
+ ; FAST: RET 0, implicit $al
; GREEDY-LABEL: name: test_undef2
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(s8) = COPY %dil
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(s8) = COPY $dil
; GREEDY: [[DEF:%[0-9]+]]:gpr(s8) = G_IMPLICIT_DEF
; GREEDY: [[ADD:%[0-9]+]]:gpr(s8) = G_ADD [[COPY]], [[DEF]]
- ; GREEDY: %al = COPY [[ADD]](s8)
- ; GREEDY: RET 0, implicit %al
- %0(s8) = COPY %dil
+ ; GREEDY: $al = COPY [[ADD]](s8)
+ ; GREEDY: RET 0, implicit $al
+ %0(s8) = COPY $dil
%1(s8) = G_IMPLICIT_DEF
%2(s8) = G_ADD %0, %1
- %al = COPY %2(s8)
- RET 0, implicit %al
+ $al = COPY %2(s8)
+ RET 0, implicit $al
...
---
@@ -1520,15 +1520,15 @@
bb.1 (%ir-block.0):
; FAST-LABEL: name: test_undef3
; FAST: [[DEF:%[0-9]+]]:gpr(s32) = G_IMPLICIT_DEF
- ; FAST: %xmm0 = COPY [[DEF]](s32)
- ; FAST: RET 0, implicit %xmm0
+ ; FAST: $xmm0 = COPY [[DEF]](s32)
+ ; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_undef3
; GREEDY: [[DEF:%[0-9]+]]:gpr(s32) = G_IMPLICIT_DEF
- ; GREEDY: %xmm0 = COPY [[DEF]](s32)
- ; GREEDY: RET 0, implicit %xmm0
+ ; GREEDY: $xmm0 = COPY [[DEF]](s32)
+ ; GREEDY: RET 0, implicit $xmm0
%0(s32) = G_IMPLICIT_DEF
- %xmm0 = COPY %0(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %0(s32)
+ RET 0, implicit $xmm0
...
---
@@ -1546,27 +1546,27 @@
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0
+ liveins: $xmm0
; FAST-LABEL: name: test_undef4
- ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY %xmm0
+ ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0
; FAST: [[DEF:%[0-9]+]]:gpr(s32) = G_IMPLICIT_DEF
; FAST: [[COPY1:%[0-9]+]]:vecr(s32) = COPY [[DEF]](s32)
; FAST: [[FADD:%[0-9]+]]:vecr(s32) = G_FADD [[COPY]], [[COPY1]]
- ; FAST: %xmm0 = COPY [[FADD]](s32)
- ; FAST: RET 0, implicit %xmm0
+ ; FAST: $xmm0 = COPY [[FADD]](s32)
+ ; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_undef4
- ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY %xmm0
+ ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0
; GREEDY: [[DEF:%[0-9]+]]:gpr(s32) = G_IMPLICIT_DEF
; GREEDY: [[COPY1:%[0-9]+]]:vecr(s32) = COPY [[DEF]](s32)
; GREEDY: [[FADD:%[0-9]+]]:vecr(s32) = G_FADD [[COPY]], [[COPY1]]
- ; GREEDY: %xmm0 = COPY [[FADD]](s32)
- ; GREEDY: RET 0, implicit %xmm0
- %0(s32) = COPY %xmm0
+ ; GREEDY: $xmm0 = COPY [[FADD]](s32)
+ ; GREEDY: RET 0, implicit $xmm0
+ %0(s32) = COPY $xmm0
%1(s32) = G_IMPLICIT_DEF
%2(s32) = G_FADD %0, %1
- %xmm0 = COPY %2(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s32)
+ RET 0, implicit $xmm0
...
---
@@ -1586,10 +1586,10 @@
; FAST-LABEL: name: test_i32
; FAST: bb.0.entry:
; FAST: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; FAST: liveins: %edi, %edx, %esi
- ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
- ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi
- ; FAST: [[COPY2:%[0-9]+]]:gpr(s32) = COPY %edx
+ ; FAST: liveins: $edi, $edx, $esi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
+ ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
+ ; FAST: [[COPY2:%[0-9]+]]:gpr(s32) = COPY $edx
; FAST: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
; FAST: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
; FAST: G_BRCOND [[ICMP]](s1), %bb.1
@@ -1601,15 +1601,15 @@
; FAST: successors: %bb.3(0x80000000)
; FAST: bb.3.cond.end:
; FAST: [[PHI:%[0-9]+]]:gpr(s32) = G_PHI [[COPY1]](s32), %bb.1, [[COPY2]](s32), %bb.2
- ; FAST: %eax = COPY [[PHI]](s32)
- ; FAST: RET 0, implicit %eax
+ ; FAST: $eax = COPY [[PHI]](s32)
+ ; FAST: RET 0, implicit $eax
; GREEDY-LABEL: name: test_i32
; GREEDY: bb.0.entry:
; GREEDY: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; GREEDY: liveins: %edi, %edx, %esi
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
- ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi
- ; GREEDY: [[COPY2:%[0-9]+]]:gpr(s32) = COPY %edx
+ ; GREEDY: liveins: $edi, $edx, $esi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
+ ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
+ ; GREEDY: [[COPY2:%[0-9]+]]:gpr(s32) = COPY $edx
; GREEDY: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
; GREEDY: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
; GREEDY: G_BRCOND [[ICMP]](s1), %bb.1
@@ -1621,15 +1621,15 @@
; GREEDY: successors: %bb.3(0x80000000)
; GREEDY: bb.3.cond.end:
; GREEDY: [[PHI:%[0-9]+]]:gpr(s32) = G_PHI [[COPY1]](s32), %bb.1, [[COPY2]](s32), %bb.2
- ; GREEDY: %eax = COPY [[PHI]](s32)
- ; GREEDY: RET 0, implicit %eax
+ ; GREEDY: $eax = COPY [[PHI]](s32)
+ ; GREEDY: RET 0, implicit $eax
bb.0.entry:
successors: %bb.1(0x40000000), %bb.2(0x40000000)
- liveins: %edi, %edx, %esi
+ liveins: $edi, $edx, $esi
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
- %2(s32) = COPY %edx
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
+ %2(s32) = COPY $edx
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
G_BRCOND %4(s1), %bb.1
@@ -1645,8 +1645,8 @@
bb.3.cond.end:
%5(s32) = G_PHI %1(s32), %bb.1, %2(s32), %bb.2
- %eax = COPY %5(s32)
- RET 0, implicit %eax
+ $eax = COPY %5(s32)
+ RET 0, implicit $eax
...
---
@@ -1666,10 +1666,10 @@
; FAST-LABEL: name: test_float
; FAST: bb.0.entry:
; FAST: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; FAST: liveins: %edi, %xmm0, %xmm1
- ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
- ; FAST: [[COPY1:%[0-9]+]]:vecr(s32) = COPY %xmm0
- ; FAST: [[COPY2:%[0-9]+]]:vecr(s32) = COPY %xmm1
+ ; FAST: liveins: $edi, $xmm0, $xmm1
+ ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
+ ; FAST: [[COPY1:%[0-9]+]]:vecr(s32) = COPY $xmm0
+ ; FAST: [[COPY2:%[0-9]+]]:vecr(s32) = COPY $xmm1
; FAST: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
; FAST: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
; FAST: G_BRCOND [[ICMP]](s1), %bb.1
@@ -1681,15 +1681,15 @@
; FAST: successors: %bb.3(0x80000000)
; FAST: bb.3.cond.end:
; FAST: [[PHI:%[0-9]+]]:vecr(s32) = G_PHI [[COPY1]](s32), %bb.1, [[COPY2]](s32), %bb.2
- ; FAST: %xmm0 = COPY [[PHI]](s32)
- ; FAST: RET 0, implicit %xmm0
+ ; FAST: $xmm0 = COPY [[PHI]](s32)
+ ; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_float
; GREEDY: bb.0.entry:
; GREEDY: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; GREEDY: liveins: %edi, %xmm0, %xmm1
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
- ; GREEDY: [[COPY1:%[0-9]+]]:vecr(s32) = COPY %xmm0
- ; GREEDY: [[COPY2:%[0-9]+]]:vecr(s32) = COPY %xmm1
+ ; GREEDY: liveins: $edi, $xmm0, $xmm1
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
+ ; GREEDY: [[COPY1:%[0-9]+]]:vecr(s32) = COPY $xmm0
+ ; GREEDY: [[COPY2:%[0-9]+]]:vecr(s32) = COPY $xmm1
; GREEDY: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
; GREEDY: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
; GREEDY: G_BRCOND [[ICMP]](s1), %bb.1
@@ -1701,15 +1701,15 @@
; GREEDY: successors: %bb.3(0x80000000)
; GREEDY: bb.3.cond.end:
; GREEDY: [[PHI:%[0-9]+]]:vecr(s32) = G_PHI [[COPY1]](s32), %bb.1, [[COPY2]](s32), %bb.2
- ; GREEDY: %xmm0 = COPY [[PHI]](s32)
- ; GREEDY: RET 0, implicit %xmm0
+ ; GREEDY: $xmm0 = COPY [[PHI]](s32)
+ ; GREEDY: RET 0, implicit $xmm0
bb.0.entry:
successors: %bb.1(0x40000000), %bb.2(0x40000000)
- liveins: %edi, %xmm0, %xmm1
+ liveins: $edi, $xmm0, $xmm1
- %0(s32) = COPY %edi
- %1(s32) = COPY %xmm0
- %2(s32) = COPY %xmm1
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $xmm0
+ %2(s32) = COPY $xmm1
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
G_BRCOND %4(s1), %bb.1
@@ -1725,8 +1725,8 @@
bb.3.cond.end:
%5(s32) = G_PHI %1(s32), %bb.1, %2(s32), %bb.2
- %xmm0 = COPY %5(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %5(s32)
+ RET 0, implicit $xmm0
...
---
@@ -1739,22 +1739,22 @@
- { id: 1, class: _, preferred-register: '' }
body: |
bb.1.entry:
- liveins: %xmm0
+ liveins: $xmm0
; FAST-LABEL: name: test_fpext
- ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY %xmm0
+ ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0
; FAST: [[FPEXT:%[0-9]+]]:vecr(s64) = G_FPEXT [[COPY]](s32)
- ; FAST: %xmm0 = COPY [[FPEXT]](s64)
- ; FAST: RET 0, implicit %xmm0
+ ; FAST: $xmm0 = COPY [[FPEXT]](s64)
+ ; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_fpext
- ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY %xmm0
+ ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0
; GREEDY: [[FPEXT:%[0-9]+]]:vecr(s64) = G_FPEXT [[COPY]](s32)
- ; GREEDY: %xmm0 = COPY [[FPEXT]](s64)
- ; GREEDY: RET 0, implicit %xmm0
- %0(s32) = COPY %xmm0
+ ; GREEDY: $xmm0 = COPY [[FPEXT]](s64)
+ ; GREEDY: RET 0, implicit $xmm0
+ %0(s32) = COPY $xmm0
%1(s64) = G_FPEXT %0(s32)
- %xmm0 = COPY %1(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(s64)
+ RET 0, implicit $xmm0
...
---
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-GV.mir b/llvm/test/CodeGen/X86/GlobalISel/select-GV.mir
index 0248ca2..b1e07a2 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-GV.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-GV.mir
@@ -41,27 +41,27 @@
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
# X64: %0:gr64 = IMPLICIT_DEF
-# X64-NEXT: %1:gr64 = LEA64r %noreg, 1, %noreg, @g_int, %noreg
-# X64-NEXT: MOV64mr %0, 1, %noreg, 0, %noreg, %1 :: (store 8 into `i32** undef`)
+# X64-NEXT: %1:gr64 = LEA64r $noreg, 1, $noreg, @g_int, $noreg
+# X64-NEXT: MOV64mr %0, 1, $noreg, 0, $noreg, %1 :: (store 8 into `i32** undef`)
# X64-NEXT: RET 0
#
# X64_DARWIN_PIC: %0:gr64 = IMPLICIT_DEF
-# X64_DARWIN_PIC-NEXT: %1:gr64 = LEA64r %rip, 1, %noreg, @g_int, %noreg
-# X64_DARWIN_PIC-NEXT: MOV64mr %0, 1, %noreg, 0, %noreg, %1 :: (store 8 into `i32** undef`)
+# X64_DARWIN_PIC-NEXT: %1:gr64 = LEA64r $rip, 1, $noreg, @g_int, $noreg
+# X64_DARWIN_PIC-NEXT: MOV64mr %0, 1, $noreg, 0, $noreg, %1 :: (store 8 into `i32** undef`)
# X64_DARWIN_PIC-NEXT: RET 0
#
# X32: %0:gr32 = IMPLICIT_DEF
-# X32-NEXT: %1:gr32 = LEA32r %noreg, 1, %noreg, @g_int, %noreg
-# X32-NEXT: MOV32mr %0, 1, %noreg, 0, %noreg, %1 :: (store 8 into `i32** undef`)
+# X32-NEXT: %1:gr32 = LEA32r $noreg, 1, $noreg, @g_int, $noreg
+# X32-NEXT: MOV32mr %0, 1, $noreg, 0, $noreg, %1 :: (store 8 into `i32** undef`)
# X32-NEXT: RET 0
#
# X32ABI: %0:low32_addr_access = IMPLICIT_DEF
-# X32ABI-NEXT: %1:gr32 = LEA64_32r %noreg, 1, %noreg, @g_int, %noreg
-# X32ABI-NEXT: MOV32mr %0, 1, %noreg, 0, %noreg, %1 :: (store 8 into `i32** undef`)
+# X32ABI-NEXT: %1:gr32 = LEA64_32r $noreg, 1, $noreg, @g_int, $noreg
+# X32ABI-NEXT: MOV32mr %0, 1, $noreg, 0, $noreg, %1 :: (store 8 into `i32** undef`)
# X32ABI-NEXT: RET 0
body: |
bb.1.entry:
- liveins: %rdi
+ liveins: $rdi
%0(p0) = IMPLICIT_DEF
%1(p0) = G_GLOBAL_VALUE @g_int
@@ -85,30 +85,30 @@
registers:
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
-# X64: %1:gr64 = LEA64r %noreg, 1, %noreg, @g_int, %noreg
-# X64-NEXT: %0:gr32 = MOV32rm %1, 1, %noreg, 0, %noreg :: (load 4 from @g_int)
-# X64-NEXT: %eax = COPY %0
-# X64-NEXT: RET 0, implicit %eax
+# X64: %1:gr64 = LEA64r $noreg, 1, $noreg, @g_int, $noreg
+# X64-NEXT: %0:gr32 = MOV32rm %1, 1, $noreg, 0, $noreg :: (load 4 from @g_int)
+# X64-NEXT: $eax = COPY %0
+# X64-NEXT: RET 0, implicit $eax
#
-# X64_DARWIN_PIC: %1:gr64 = LEA64r %rip, 1, %noreg, @g_int, %noreg
-# X64_DARWIN_PIC-NEXT: %0:gr32 = MOV32rm %1, 1, %noreg, 0, %noreg :: (load 4 from @g_int)
-# X64_DARWIN_PIC-NEXT: %eax = COPY %0
-# X64_DARWIN_PIC-NEXT: RET 0, implicit %eax
+# X64_DARWIN_PIC: %1:gr64 = LEA64r $rip, 1, $noreg, @g_int, $noreg
+# X64_DARWIN_PIC-NEXT: %0:gr32 = MOV32rm %1, 1, $noreg, 0, $noreg :: (load 4 from @g_int)
+# X64_DARWIN_PIC-NEXT: $eax = COPY %0
+# X64_DARWIN_PIC-NEXT: RET 0, implicit $eax
#
-# X32: %1:gr32 = LEA32r %noreg, 1, %noreg, @g_int, %noreg
-# X32-NEXT: %0:gr32 = MOV32rm %1, 1, %noreg, 0, %noreg :: (load 4 from @g_int)
-# X32-NEXT: %eax = COPY %0
-# X32-NEXT: RET 0, implicit %eax
+# X32: %1:gr32 = LEA32r $noreg, 1, $noreg, @g_int, $noreg
+# X32-NEXT: %0:gr32 = MOV32rm %1, 1, $noreg, 0, $noreg :: (load 4 from @g_int)
+# X32-NEXT: $eax = COPY %0
+# X32-NEXT: RET 0, implicit $eax
#
-# X32ABI: %1:gr32 = LEA64_32r %noreg, 1, %noreg, @g_int, %noreg
-# X32ABI-NEXT: %0:gr32 = MOV32rm %1, 1, %noreg, 0, %noreg :: (load 4 from @g_int)
-# X32ABI-NEXT: %eax = COPY %0
-# X32ABI-NEXT: RET 0, implicit %eax
+# X32ABI: %1:gr32 = LEA64_32r $noreg, 1, $noreg, @g_int, $noreg
+# X32ABI-NEXT: %0:gr32 = MOV32rm %1, 1, $noreg, 0, $noreg :: (load 4 from @g_int)
+# X32ABI-NEXT: $eax = COPY %0
+# X32ABI-NEXT: RET 0, implicit $eax
body: |
bb.1.entry:
%1(p0) = G_GLOBAL_VALUE @g_int
%0(s32) = G_LOAD %1(p0) :: (load 4 from @g_int)
- %eax = COPY %0(s32)
- RET 0, implicit %eax
+ $eax = COPY %0(s32)
+ RET 0, implicit $eax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-add-v128.mir b/llvm/test/CodeGen/X86/GlobalISel/select-add-v128.mir
index 7a2f606..5aa2f19 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-add-v128.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-add-v128.mir
@@ -58,13 +58,13 @@
# AVX512BWVL: %2:vr128x = VPADDBZ128rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<16 x s8>) = COPY %xmm0
- %1(<16 x s8>) = COPY %xmm1
+ %0(<16 x s8>) = COPY $xmm0
+ %1(<16 x s8>) = COPY $xmm1
%2(<16 x s8>) = G_ADD %0, %1
- %xmm0 = COPY %2(<16 x s8>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<16 x s8>)
+ RET 0, implicit $xmm0
...
---
@@ -100,13 +100,13 @@
# AVX512BWVL: %2:vr128x = VPADDWZ128rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<8 x s16>) = COPY %xmm0
- %1(<8 x s16>) = COPY %xmm1
+ %0(<8 x s16>) = COPY $xmm0
+ %1(<8 x s16>) = COPY $xmm1
%2(<8 x s16>) = G_ADD %0, %1
- %xmm0 = COPY %2(<8 x s16>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<8 x s16>)
+ RET 0, implicit $xmm0
...
---
@@ -142,13 +142,13 @@
# AVX512BWVL: %2:vr128x = VPADDDZ128rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<4 x s32>) = COPY %xmm0
- %1(<4 x s32>) = COPY %xmm1
+ %0(<4 x s32>) = COPY $xmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<4 x s32>) = G_ADD %0, %1
- %xmm0 = COPY %2(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -184,12 +184,12 @@
# AVX512BWVL: %2:vr128x = VPADDQZ128rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<2 x s64>) = COPY %xmm0
- %1(<2 x s64>) = COPY %xmm1
+ %0(<2 x s64>) = COPY $xmm0
+ %1(<2 x s64>) = COPY $xmm1
%2(<2 x s64>) = G_ADD %0, %1
- %xmm0 = COPY %2(<2 x s64>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<2 x s64>)
+ RET 0, implicit $xmm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-add-v256.mir b/llvm/test/CodeGen/X86/GlobalISel/select-add-v256.mir
index 8a98a6d..3e307bb 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-add-v256.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-add-v256.mir
@@ -54,13 +54,13 @@
# AVX512BWVL: %2:vr256x = VPADDBZ256rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<32 x s8>) = COPY %ymm0
- %1(<32 x s8>) = COPY %ymm1
+ %0(<32 x s8>) = COPY $ymm0
+ %1(<32 x s8>) = COPY $ymm1
%2(<32 x s8>) = G_ADD %0, %1
- %ymm0 = COPY %2(<32 x s8>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<32 x s8>)
+ RET 0, implicit $ymm0
...
---
@@ -94,13 +94,13 @@
# AVX512BWVL: %2:vr256x = VPADDWZ256rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<16 x s16>) = COPY %ymm0
- %1(<16 x s16>) = COPY %ymm1
+ %0(<16 x s16>) = COPY $ymm0
+ %1(<16 x s16>) = COPY $ymm1
%2(<16 x s16>) = G_ADD %0, %1
- %ymm0 = COPY %2(<16 x s16>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<16 x s16>)
+ RET 0, implicit $ymm0
...
---
@@ -134,13 +134,13 @@
# AVX512BWVL: %2:vr256x = VPADDDZ256rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<8 x s32>) = COPY %ymm0
- %1(<8 x s32>) = COPY %ymm1
+ %0(<8 x s32>) = COPY $ymm0
+ %1(<8 x s32>) = COPY $ymm1
%2(<8 x s32>) = G_ADD %0, %1
- %ymm0 = COPY %2(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<8 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -174,12 +174,12 @@
# AVX512BWVL: %2:vr256x = VPADDQZ256rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<4 x s64>) = COPY %ymm0
- %1(<4 x s64>) = COPY %ymm1
+ %0(<4 x s64>) = COPY $ymm0
+ %1(<4 x s64>) = COPY $ymm1
%2(<4 x s64>) = G_ADD %0, %1
- %ymm0 = COPY %2(<4 x s64>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<4 x s64>)
+ RET 0, implicit $ymm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-add-v512.mir b/llvm/test/CodeGen/X86/GlobalISel/select-add-v512.mir
index 392d22c..e795b71 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-add-v512.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-add-v512.mir
@@ -36,19 +36,19 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_add_v64i8
- ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1
+ ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1
; ALL: [[VPADDBZrr:%[0-9]+]]:vr512 = VPADDBZrr [[COPY]], [[COPY1]]
- ; ALL: %zmm0 = COPY [[VPADDBZrr]]
- ; ALL: RET 0, implicit %zmm0
- %0(<64 x s8>) = COPY %zmm0
- %1(<64 x s8>) = COPY %zmm1
+ ; ALL: $zmm0 = COPY [[VPADDBZrr]]
+ ; ALL: RET 0, implicit $zmm0
+ %0(<64 x s8>) = COPY $zmm0
+ %1(<64 x s8>) = COPY $zmm1
%2(<64 x s8>) = G_ADD %0, %1
- %zmm0 = COPY %2(<64 x s8>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<64 x s8>)
+ RET 0, implicit $zmm0
...
---
@@ -62,19 +62,19 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_add_v32i16
- ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1
+ ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1
; ALL: [[VPADDWZrr:%[0-9]+]]:vr512 = VPADDWZrr [[COPY]], [[COPY1]]
- ; ALL: %zmm0 = COPY [[VPADDWZrr]]
- ; ALL: RET 0, implicit %zmm0
- %0(<32 x s16>) = COPY %zmm0
- %1(<32 x s16>) = COPY %zmm1
+ ; ALL: $zmm0 = COPY [[VPADDWZrr]]
+ ; ALL: RET 0, implicit $zmm0
+ %0(<32 x s16>) = COPY $zmm0
+ %1(<32 x s16>) = COPY $zmm1
%2(<32 x s16>) = G_ADD %0, %1
- %zmm0 = COPY %2(<32 x s16>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<32 x s16>)
+ RET 0, implicit $zmm0
...
---
@@ -88,19 +88,19 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_add_v16i32
- ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1
+ ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1
; ALL: [[VPADDDZrr:%[0-9]+]]:vr512 = VPADDDZrr [[COPY]], [[COPY1]]
- ; ALL: %zmm0 = COPY [[VPADDDZrr]]
- ; ALL: RET 0, implicit %zmm0
- %0(<16 x s32>) = COPY %zmm0
- %1(<16 x s32>) = COPY %zmm1
+ ; ALL: $zmm0 = COPY [[VPADDDZrr]]
+ ; ALL: RET 0, implicit $zmm0
+ %0(<16 x s32>) = COPY $zmm0
+ %1(<16 x s32>) = COPY $zmm1
%2(<16 x s32>) = G_ADD %0, %1
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $zmm0
...
---
@@ -114,18 +114,18 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_add_v8i64
- ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1
+ ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1
; ALL: [[VPADDQZrr:%[0-9]+]]:vr512 = VPADDQZrr [[COPY]], [[COPY1]]
- ; ALL: %zmm0 = COPY [[VPADDQZrr]]
- ; ALL: RET 0, implicit %zmm0
- %0(<8 x s64>) = COPY %zmm0
- %1(<8 x s64>) = COPY %zmm1
+ ; ALL: $zmm0 = COPY [[VPADDQZrr]]
+ ; ALL: RET 0, implicit $zmm0
+ %0(<8 x s64>) = COPY $zmm0
+ %1(<8 x s64>) = COPY $zmm1
%2(<8 x s64>) = G_ADD %0, %1
- %zmm0 = COPY %2(<8 x s64>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<8 x s64>)
+ RET 0, implicit $zmm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-add-x32.mir b/llvm/test/CodeGen/X86/GlobalISel/select-add-x32.mir
index 4f04bc5..4e743df 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-add-x32.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-add-x32.mir
@@ -30,14 +30,14 @@
; X32: [[DEF1:%[0-9]+]]:gr32 = IMPLICIT_DEF
; X32: [[DEF2:%[0-9]+]]:gr32 = IMPLICIT_DEF
; X32: [[DEF3:%[0-9]+]]:gr32 = IMPLICIT_DEF
- ; X32: [[ADD32rr:%[0-9]+]]:gr32 = ADD32rr [[DEF]], [[DEF2]], implicit-def %eflags
- ; X32: [[COPY:%[0-9]+]]:gr32 = COPY %eflags
- ; X32: %eflags = COPY [[COPY]]
- ; X32: [[ADC32rr:%[0-9]+]]:gr32 = ADC32rr [[DEF1]], [[DEF3]], implicit-def %eflags, implicit %eflags
- ; X32: [[COPY1:%[0-9]+]]:gr32 = COPY %eflags
- ; X32: %eax = COPY [[ADD32rr]]
- ; X32: %edx = COPY [[ADC32rr]]
- ; X32: RET 0, implicit %eax, implicit %edx
+ ; X32: [[ADD32rr:%[0-9]+]]:gr32 = ADD32rr [[DEF]], [[DEF2]], implicit-def $eflags
+ ; X32: [[COPY:%[0-9]+]]:gr32 = COPY $eflags
+ ; X32: $eflags = COPY [[COPY]]
+ ; X32: [[ADC32rr:%[0-9]+]]:gr32 = ADC32rr [[DEF1]], [[DEF3]], implicit-def $eflags, implicit $eflags
+ ; X32: [[COPY1:%[0-9]+]]:gr32 = COPY $eflags
+ ; X32: $eax = COPY [[ADD32rr]]
+ ; X32: $edx = COPY [[ADC32rr]]
+ ; X32: RET 0, implicit $eax, implicit $edx
%0(s32) = IMPLICIT_DEF
%1(s32) = IMPLICIT_DEF
%2(s32) = IMPLICIT_DEF
@@ -46,8 +46,8 @@
%4(s1) = G_TRUNC %9(s8)
%5(s32), %6(s1) = G_UADDE %0, %2, %4
%7(s32), %8(s1) = G_UADDE %1, %3, %6
- %eax = COPY %5(s32)
- %edx = COPY %7(s32)
- RET 0, implicit %eax, implicit %edx
+ $eax = COPY %5(s32)
+ $edx = COPY %7(s32)
+ RET 0, implicit $eax, implicit $edx
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-add.mir b/llvm/test/CodeGen/X86/GlobalISel/select-add.mir
index 1f42fd4..4b86305 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-add.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-add.mir
@@ -44,17 +44,17 @@
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# ALL: %0:gr64 = COPY %rdi
-# ALL-NEXT: %1:gr64 = COPY %rsi
+# ALL: %0:gr64 = COPY $rdi
+# ALL-NEXT: %1:gr64 = COPY $rsi
# ALL-NEXT: %2:gr64 = ADD64rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
- %0(s64) = COPY %rdi
- %1(s64) = COPY %rsi
+ %0(s64) = COPY $rdi
+ %1(s64) = COPY $rsi
%2(s64) = G_ADD %0, %1
- %rax = COPY %2(s64)
+ $rax = COPY %2(s64)
...
@@ -67,17 +67,17 @@
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# ALL: %0:gr32 = COPY %edi
-# ALL-NEXT: %1:gr32 = COPY %esi
+# ALL: %0:gr32 = COPY $edi
+# ALL-NEXT: %1:gr32 = COPY $esi
# ALL-NEXT: %2:gr32 = ADD32rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s32) = G_ADD %0, %1
- %eax = COPY %2(s32)
+ $eax = COPY %2(s32)
...
---
@@ -91,18 +91,18 @@
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# ALL: %0:gr16 = COPY %di
-# ALL: %1:gr16 = COPY %si
-# ALL: %2:gr16 = ADD16rr %0, %1, implicit-def %eflags
+# ALL: %0:gr16 = COPY $di
+# ALL: %1:gr16 = COPY $si
+# ALL: %2:gr16 = ADD16rr %0, %1, implicit-def $eflags
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
- %0(s16) = COPY %di
- %1(s16) = COPY %si
+ %0(s16) = COPY $di
+ %1(s16) = COPY $si
%2(s16) = G_ADD %0, %1
- %ax = COPY %2(s16)
- RET 0, implicit %ax
+ $ax = COPY %2(s16)
+ RET 0, implicit $ax
...
---
@@ -116,18 +116,18 @@
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# ALL: %0:gr8 = COPY %dil
-# ALL: %1:gr8 = COPY %sil
-# ALL: %2:gr8 = ADD8rr %0, %1, implicit-def %eflags
+# ALL: %0:gr8 = COPY $dil
+# ALL: %1:gr8 = COPY $sil
+# ALL: %2:gr8 = ADD8rr %0, %1, implicit-def $eflags
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
- %0(s8) = COPY %dil
- %1(s8) = COPY %sil
+ %0(s8) = COPY $dil
+ %1(s8) = COPY $sil
%2(s8) = G_ADD %0, %1
- %al = COPY %2(s8)
- RET 0, implicit %al
+ $al = COPY %2(s8)
+ RET 0, implicit $al
...
---
@@ -142,23 +142,23 @@
- { id: 0, class: vecr }
- { id: 1, class: vecr }
- { id: 2, class: vecr }
-# NO_AVX512VL: %0:vr128 = COPY %xmm0
-# NO_AVX512VL: %1:vr128 = COPY %xmm1
+# NO_AVX512VL: %0:vr128 = COPY $xmm0
+# NO_AVX512VL: %1:vr128 = COPY $xmm1
# SSE-NEXT: %2:vr128 = PADDDrr %0, %1
# AVX-NEXT: %2:vr128 = VPADDDrr %0, %1
# AVX512F-NEXT: %2:vr128 = VPADDDrr %0, %1
-# AVX512VL: %0:vr128x = COPY %xmm0
-# AVX512VL: %1:vr128x = COPY %xmm1
+# AVX512VL: %0:vr128x = COPY $xmm0
+# AVX512VL: %1:vr128x = COPY $xmm1
# AVX512VL-NEXT: %2:vr128x = VPADDDZ128rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<4 x s32>) = COPY %xmm0
- %1(<4 x s32>) = COPY %xmm1
+ %0(<4 x s32>) = COPY $xmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<4 x s32>) = G_ADD %0, %1
- %xmm0 = COPY %2(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -173,26 +173,26 @@
- { id: 0, class: vecr }
- { id: 1, class: vecr }
- { id: 2, class: vecr }
-# SSE: %0:vr128 = COPY %xmm0
-# SSE-NEXT: %1:vr128 = COPY %xmm1
+# SSE: %0:vr128 = COPY $xmm0
+# SSE-NEXT: %1:vr128 = COPY $xmm1
# SSE-NEXT: %2:vr128 = ADDPSrr %0, %1
-# AVX: %0:vr128 = COPY %xmm0
-# AVX-NEXT: %1:vr128 = COPY %xmm1
+# AVX: %0:vr128 = COPY $xmm0
+# AVX-NEXT: %1:vr128 = COPY $xmm1
# AVX-NEXT: %2:vr128 = VADDPSrr %0, %1
-# AVX512F: %0:vr128 = COPY %xmm0
-# AVX512F-NEXT: 1:vr128 = COPY %xmm1
+# AVX512F: %0:vr128 = COPY $xmm0
+# AVX512F-NEXT: 1:vr128 = COPY $xmm1
# AVX512F-NEXT: %2:vr128 = VADDPSrr %0, %1
-# AVX512VL: %0:vr128x = COPY %xmm0
-# AVX512VL-NEXT: %1:vr128x = COPY %xmm1
+# AVX512VL: %0:vr128x = COPY $xmm0
+# AVX512VL-NEXT: %1:vr128x = COPY $xmm1
# AVX512VL-NEXT: %2:vr128x = VADDPSZ128rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<4 x s32>) = COPY %xmm0
- %1(<4 x s32>) = COPY %xmm1
+ %0(<4 x s32>) = COPY $xmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<4 x s32>) = G_FADD %0, %1
- %xmm0 = COPY %2(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit $xmm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-and-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/select-and-scalar.mir
index 0ecb881..a7bf99a 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-and-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-and-scalar.mir
@@ -38,19 +38,19 @@
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; ALL-LABEL: name: test_and_i8
- ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY %dil
- ; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY %sil
- ; ALL: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %al = COPY [[AND8rr]]
- ; ALL: RET 0, implicit %al
- %0(s8) = COPY %dil
- %1(s8) = COPY %sil
+ ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY $dil
+ ; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY $sil
+ ; ALL: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $al = COPY [[AND8rr]]
+ ; ALL: RET 0, implicit $al
+ %0(s8) = COPY $dil
+ %1(s8) = COPY $sil
%2(s8) = G_AND %0, %1
- %al = COPY %2(s8)
- RET 0, implicit %al
+ $al = COPY %2(s8)
+ RET 0, implicit $al
...
---
@@ -68,19 +68,19 @@
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; ALL-LABEL: name: test_and_i16
- ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY %di
- ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY %si
- ; ALL: [[AND16rr:%[0-9]+]]:gr16 = AND16rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %ax = COPY [[AND16rr]]
- ; ALL: RET 0, implicit %ax
- %0(s16) = COPY %di
- %1(s16) = COPY %si
+ ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY $di
+ ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY $si
+ ; ALL: [[AND16rr:%[0-9]+]]:gr16 = AND16rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $ax = COPY [[AND16rr]]
+ ; ALL: RET 0, implicit $ax
+ %0(s16) = COPY $di
+ %1(s16) = COPY $si
%2(s16) = G_AND %0, %1
- %ax = COPY %2(s16)
- RET 0, implicit %ax
+ $ax = COPY %2(s16)
+ RET 0, implicit $ax
...
---
@@ -98,19 +98,19 @@
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; ALL-LABEL: name: test_and_i32
- ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; ALL: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %eax = COPY [[AND32rr]]
- ; ALL: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; ALL: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $eax = COPY [[AND32rr]]
+ ; ALL: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s32) = G_AND %0, %1
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -128,18 +128,18 @@
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
; ALL-LABEL: name: test_and_i64
- ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; ALL: [[AND64rr:%[0-9]+]]:gr64 = AND64rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %rax = COPY [[AND64rr]]
- ; ALL: RET 0, implicit %rax
- %0(s64) = COPY %rdi
- %1(s64) = COPY %rsi
+ ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; ALL: [[AND64rr:%[0-9]+]]:gr64 = AND64rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $rax = COPY [[AND64rr]]
+ ; ALL: RET 0, implicit $rax
+ %0(s64) = COPY $rdi
+ %1(s64) = COPY $rsi
%2(s64) = G_AND %0, %1
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-blsi.mir b/llvm/test/CodeGen/X86/GlobalISel/select-blsi.mir
index 0247883..35fe24e 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-blsi.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-blsi.mir
@@ -21,17 +21,17 @@
# G_SUB and G_AND both use %0 so we should match this.
body: |
bb.1:
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_blsi32rr
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; CHECK: [[BLSI32rr:%[0-9]+]]:gr32 = BLSI32rr [[COPY]], implicit-def %eflags
- ; CHECK: %edi = COPY [[BLSI32rr]]
- %0(s32) = COPY %edi
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[BLSI32rr:%[0-9]+]]:gr32 = BLSI32rr [[COPY]], implicit-def $eflags
+ ; CHECK: $edi = COPY [[BLSI32rr]]
+ %0(s32) = COPY $edi
%1(s32) = G_CONSTANT i32 0
%2(s32) = G_SUB %1, %0
%3(s32) = G_AND %2, %0
- %edi = COPY %3
+ $edi = COPY %3
...
---
@@ -47,17 +47,17 @@
# G_SUB and G_AND use different operands so we shouldn't match this.
body: |
bb.1:
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_blsi32rr_nomatch
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; CHECK: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def %eflags
- ; CHECK: [[SUB32ri:%[0-9]+]]:gr32 = SUB32ri [[MOV32r0_]], 0, implicit-def %eflags
- ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[SUB32ri]], [[COPY]], implicit-def %eflags
- ; CHECK: %edi = COPY [[AND32rr]]
- %0(s32) = COPY %edi
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
+ ; CHECK: [[SUB32ri:%[0-9]+]]:gr32 = SUB32ri [[MOV32r0_]], 0, implicit-def $eflags
+ ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[SUB32ri]], [[COPY]], implicit-def $eflags
+ ; CHECK: $edi = COPY [[AND32rr]]
+ %0(s32) = COPY $edi
%1(s32) = G_CONSTANT i32 0
%2(s32) = G_SUB %1, %1
%3(s32) = G_AND %2, %0
- %edi = COPY %3
+ $edi = COPY %3
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-blsr.mir b/llvm/test/CodeGen/X86/GlobalISel/select-blsr.mir
index 95c6cfd..070a4c6 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-blsr.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-blsr.mir
@@ -18,17 +18,17 @@
# G_ADD and G_AND both use %0 so we should match this.
body: |
bb.1:
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_blsr32rr
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; CHECK: [[BLSR32rr:%[0-9]+]]:gr32 = BLSR32rr [[COPY]], implicit-def %eflags
- ; CHECK: %edi = COPY [[BLSR32rr]]
- %0(s32) = COPY %edi
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[BLSR32rr:%[0-9]+]]:gr32 = BLSR32rr [[COPY]], implicit-def $eflags
+ ; CHECK: $edi = COPY [[BLSR32rr]]
+ %0(s32) = COPY $edi
%1(s32) = G_CONSTANT i32 -1
%2(s32) = G_ADD %0, %1
%3(s32) = G_AND %2, %0
- %edi = COPY %3
+ $edi = COPY %3
...
---
@@ -44,17 +44,17 @@
# G_ADD and G_AND use different operands so we shouldn't match this.
body: |
bb.1:
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_blsr32rr_nomatch
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
; CHECK: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 4294967295
- ; CHECK: [[DEC32r:%[0-9]+]]:gr32 = DEC32r [[MOV32ri]], implicit-def %eflags
- ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[DEC32r]], [[COPY]], implicit-def %eflags
- ; CHECK: %edi = COPY [[AND32rr]]
- %0(s32) = COPY %edi
+ ; CHECK: [[DEC32r:%[0-9]+]]:gr32 = DEC32r [[MOV32ri]], implicit-def $eflags
+ ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[DEC32r]], [[COPY]], implicit-def $eflags
+ ; CHECK: $edi = COPY [[AND32rr]]
+ %0(s32) = COPY $edi
%1(s32) = G_CONSTANT i32 -1
%2(s32) = G_ADD %1, %1
%3(s32) = G_AND %2, %0
- %edi = COPY %3
+ $edi = COPY %3
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-brcond.mir b/llvm/test/CodeGen/X86/GlobalISel/select-brcond.mir
index 00a9cc0..bc0ae25 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-brcond.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-brcond.mir
@@ -27,28 +27,28 @@
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: gpr, preferred-register: '' }
- { id: 3, class: gpr, preferred-register: '' }
-# X64: %0:gr32 = COPY %edi
-# X32: %0:gr32_abcd = COPY %edi
-# CHECK-NEXT: %2:gr32 = MOV32r0 implicit-def %eflags
+# X64: %0:gr32 = COPY $edi
+# X32: %0:gr32_abcd = COPY $edi
+# CHECK-NEXT: %2:gr32 = MOV32r0 implicit-def $eflags
# CHECK-NEXT: %3:gr32 = MOV32ri 1
# CHECK-NEXT: %1:gr8 = COPY %0.sub_8bit
-# CHECK-NEXT: TEST8ri %1, 1, implicit-def %eflags
-# CHECK-NEXT: JNE_1 %[[TRUE:bb.[0-9]+]], implicit %eflags
+# CHECK-NEXT: TEST8ri %1, 1, implicit-def $eflags
+# CHECK-NEXT: JNE_1 %[[TRUE:bb.[0-9]+]], implicit $eflags
# CHECK-NEXT: JMP_1 %[[FALSE:bb.[0-9]+]]
# CHECK: [[TRUE]].{{[a-zA-Z0-9]+}}:
-# CHECK-NEXT: %eax = COPY %2
-# CHECK-NEXT: RET 0, implicit %eax
+# CHECK-NEXT: $eax = COPY %2
+# CHECK-NEXT: RET 0, implicit $eax
# CHECK: [[FALSE]].{{[a-zA-Z0-9]+}}:
-# CHECK-NEXT: %eax = COPY %3
-# CHECK-NEXT: RET 0, implicit %eax
+# CHECK-NEXT: $eax = COPY %3
+# CHECK-NEXT: RET 0, implicit $eax
body: |
bb.1.entry:
successors: %bb.2(0x40000000), %bb.3(0x40000000)
- liveins: %edi
+ liveins: $edi
- %0(s32) = COPY %edi
+ %0(s32) = COPY $edi
%2(s32) = G_CONSTANT i32 0
%3(s32) = G_CONSTANT i32 1
%1(s1) = G_TRUNC %0(s32)
@@ -56,11 +56,11 @@
G_BR %bb.3
bb.2.true:
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
bb.3.false:
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-cmp.mir b/llvm/test/CodeGen/X86/GlobalISel/select-cmp.mir
index 3457e97..1d5ac90 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-cmp.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-cmp.mir
@@ -93,23 +93,23 @@
- { id: 3, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_icmp_eq_i8
- ; CHECK: [[COPY:%[0-9]+]]:gr8 = COPY %dil
- ; CHECK: [[COPY1:%[0-9]+]]:gr8 = COPY %sil
- ; CHECK: CMP8rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit %eflags
+ ; CHECK: [[COPY:%[0-9]+]]:gr8 = COPY $dil
+ ; CHECK: [[COPY1:%[0-9]+]]:gr8 = COPY $sil
+ ; CHECK: CMP8rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETEr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; CHECK: %eax = COPY [[AND32ri8_]]
- ; CHECK: RET 0, implicit %eax
- %0(s8) = COPY %dil
- %1(s8) = COPY %sil
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s8) = COPY $dil
+ %1(s8) = COPY $sil
%2(s1) = G_ICMP intpred(eq), %0(s8), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -124,23 +124,23 @@
- { id: 3, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_icmp_eq_i16
- ; CHECK: [[COPY:%[0-9]+]]:gr16 = COPY %di
- ; CHECK: [[COPY1:%[0-9]+]]:gr16 = COPY %si
- ; CHECK: CMP16rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit %eflags
+ ; CHECK: [[COPY:%[0-9]+]]:gr16 = COPY $di
+ ; CHECK: [[COPY1:%[0-9]+]]:gr16 = COPY $si
+ ; CHECK: CMP16rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETEr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; CHECK: %eax = COPY [[AND32ri8_]]
- ; CHECK: RET 0, implicit %eax
- %0(s16) = COPY %di
- %1(s16) = COPY %si
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s16) = COPY $di
+ %1(s16) = COPY $si
%2(s1) = G_ICMP intpred(eq), %0(s16), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -155,23 +155,23 @@
- { id: 3, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
; CHECK-LABEL: name: test_icmp_eq_i64
- ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; CHECK: CMP64rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit %eflags
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; CHECK: CMP64rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETEr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; CHECK: %eax = COPY [[AND32ri8_]]
- ; CHECK: RET 0, implicit %eax
- %0(s64) = COPY %rdi
- %1(s64) = COPY %rsi
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s64) = COPY $rdi
+ %1(s64) = COPY $rsi
%2(s1) = G_ICMP intpred(eq), %0(s64), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -186,23 +186,23 @@
- { id: 3, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_icmp_eq_i32
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit %eflags
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETEr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; CHECK: %eax = COPY [[AND32ri8_]]
- ; CHECK: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s1) = G_ICMP intpred(eq), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -217,23 +217,23 @@
- { id: 3, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_icmp_ne_i32
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; CHECK: [[SETNEr:%[0-9]+]]:gr8 = SETNEr implicit %eflags
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; CHECK: [[SETNEr:%[0-9]+]]:gr8 = SETNEr implicit $eflags
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETNEr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; CHECK: %eax = COPY [[AND32ri8_]]
- ; CHECK: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s1) = G_ICMP intpred(ne), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -248,23 +248,23 @@
- { id: 3, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_icmp_ugt_i32
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; CHECK: [[SETAr:%[0-9]+]]:gr8 = SETAr implicit %eflags
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; CHECK: [[SETAr:%[0-9]+]]:gr8 = SETAr implicit $eflags
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETAr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; CHECK: %eax = COPY [[AND32ri8_]]
- ; CHECK: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s1) = G_ICMP intpred(ugt), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -279,23 +279,23 @@
- { id: 3, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_icmp_uge_i32
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; CHECK: [[SETAEr:%[0-9]+]]:gr8 = SETAEr implicit %eflags
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; CHECK: [[SETAEr:%[0-9]+]]:gr8 = SETAEr implicit $eflags
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETAEr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; CHECK: %eax = COPY [[AND32ri8_]]
- ; CHECK: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s1) = G_ICMP intpred(uge), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -310,23 +310,23 @@
- { id: 3, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_icmp_ult_i32
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; CHECK: [[SETBr:%[0-9]+]]:gr8 = SETBr implicit %eflags
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; CHECK: [[SETBr:%[0-9]+]]:gr8 = SETBr implicit $eflags
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETBr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; CHECK: %eax = COPY [[AND32ri8_]]
- ; CHECK: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s1) = G_ICMP intpred(ult), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -341,23 +341,23 @@
- { id: 3, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_icmp_ule_i32
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; CHECK: [[SETBEr:%[0-9]+]]:gr8 = SETBEr implicit %eflags
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; CHECK: [[SETBEr:%[0-9]+]]:gr8 = SETBEr implicit $eflags
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETBEr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; CHECK: %eax = COPY [[AND32ri8_]]
- ; CHECK: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s1) = G_ICMP intpred(ule), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -372,23 +372,23 @@
- { id: 3, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_icmp_sgt_i32
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; CHECK: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit %eflags
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; CHECK: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETGr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; CHECK: %eax = COPY [[AND32ri8_]]
- ; CHECK: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s1) = G_ICMP intpred(sgt), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -403,23 +403,23 @@
- { id: 3, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_icmp_sge_i32
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; CHECK: [[SETGEr:%[0-9]+]]:gr8 = SETGEr implicit %eflags
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; CHECK: [[SETGEr:%[0-9]+]]:gr8 = SETGEr implicit $eflags
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETGEr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; CHECK: %eax = COPY [[AND32ri8_]]
- ; CHECK: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s1) = G_ICMP intpred(sge), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -434,23 +434,23 @@
- { id: 3, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_icmp_slt_i32
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; CHECK: [[SETLr:%[0-9]+]]:gr8 = SETLr implicit %eflags
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; CHECK: [[SETLr:%[0-9]+]]:gr8 = SETLr implicit $eflags
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETLr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; CHECK: %eax = COPY [[AND32ri8_]]
- ; CHECK: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s1) = G_ICMP intpred(slt), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -465,22 +465,22 @@
- { id: 3, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_icmp_sle_i32
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; CHECK: [[SETLEr:%[0-9]+]]:gr8 = SETLEr implicit %eflags
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; CHECK: [[SETLEr:%[0-9]+]]:gr8 = SETLEr implicit $eflags
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETLEr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; CHECK: %eax = COPY [[AND32ri8_]]
- ; CHECK: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s1) = G_ICMP intpred(sle), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-constant.mir b/llvm/test/CodeGen/X86/GlobalISel/select-constant.mir
index b083288..d4666bb 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-constant.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-constant.mir
@@ -47,11 +47,11 @@
bb.1 (%ir-block.0):
; CHECK-LABEL: name: const_i8
; CHECK: [[MOV8ri:%[0-9]+]]:gr8 = MOV8ri 2
- ; CHECK: %al = COPY [[MOV8ri]]
- ; CHECK: RET 0, implicit %al
+ ; CHECK: $al = COPY [[MOV8ri]]
+ ; CHECK: RET 0, implicit $al
%0(s8) = G_CONSTANT i8 2
- %al = COPY %0(s8)
- RET 0, implicit %al
+ $al = COPY %0(s8)
+ RET 0, implicit $al
...
---
@@ -65,11 +65,11 @@
bb.1 (%ir-block.0):
; CHECK-LABEL: name: const_i16
; CHECK: [[MOV16ri:%[0-9]+]]:gr16 = MOV16ri 3
- ; CHECK: %ax = COPY [[MOV16ri]]
- ; CHECK: RET 0, implicit %ax
+ ; CHECK: $ax = COPY [[MOV16ri]]
+ ; CHECK: RET 0, implicit $ax
%0(s16) = G_CONSTANT i16 3
- %ax = COPY %0(s16)
- RET 0, implicit %ax
+ $ax = COPY %0(s16)
+ RET 0, implicit $ax
...
---
@@ -83,11 +83,11 @@
bb.1 (%ir-block.0):
; CHECK-LABEL: name: const_i32
; CHECK: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 4
- ; CHECK: %eax = COPY [[MOV32ri]]
- ; CHECK: RET 0, implicit %eax
+ ; CHECK: $eax = COPY [[MOV32ri]]
+ ; CHECK: RET 0, implicit $eax
%0(s32) = G_CONSTANT i32 4
- %eax = COPY %0(s32)
- RET 0, implicit %eax
+ $eax = COPY %0(s32)
+ RET 0, implicit $eax
...
---
@@ -99,12 +99,12 @@
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: const_i32_0
- ; CHECK: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def %eflags
- ; CHECK: %eax = COPY [[MOV32r0_]]
- ; CHECK: RET 0, implicit %eax
+ ; CHECK: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
+ ; CHECK: $eax = COPY [[MOV32r0_]]
+ ; CHECK: RET 0, implicit $eax
%0(s32) = G_CONSTANT i32 0
- %eax = COPY %0(s32)
- RET 0, implicit %eax
+ $eax = COPY %0(s32)
+ RET 0, implicit $eax
...
---
@@ -118,11 +118,11 @@
bb.1 (%ir-block.0):
; CHECK-LABEL: name: const_i64
; CHECK: [[MOV64ri:%[0-9]+]]:gr64 = MOV64ri 68719476720
- ; CHECK: %rax = COPY [[MOV64ri]]
- ; CHECK: RET 0, implicit %rax
+ ; CHECK: $rax = COPY [[MOV64ri]]
+ ; CHECK: RET 0, implicit $rax
%0(s64) = G_CONSTANT i64 68719476720
- %rax = COPY %0(s64)
- RET 0, implicit %rax
+ $rax = COPY %0(s64)
+ RET 0, implicit $rax
...
---
@@ -137,11 +137,11 @@
bb.1 (%ir-block.0):
; CHECK-LABEL: name: const_i64_u32
; CHECK: [[MOV64ri32_:%[0-9]+]]:gr64 = MOV64ri32 1879048192
- ; CHECK: %rax = COPY [[MOV64ri32_]]
- ; CHECK: RET 0, implicit %rax
+ ; CHECK: $rax = COPY [[MOV64ri32_]]
+ ; CHECK: RET 0, implicit $rax
%0(s64) = G_CONSTANT i64 1879048192
- %rax = COPY %0(s64)
- RET 0, implicit %rax
+ $rax = COPY %0(s64)
+ RET 0, implicit $rax
...
---
@@ -155,11 +155,11 @@
bb.1 (%ir-block.0):
; CHECK-LABEL: name: const_i64_i32
; CHECK: [[MOV64ri32_:%[0-9]+]]:gr64 = MOV64ri32 -1
- ; CHECK: %rax = COPY [[MOV64ri32_]]
- ; CHECK: RET 0, implicit %rax
+ ; CHECK: $rax = COPY [[MOV64ri32_]]
+ ; CHECK: RET 0, implicit $rax
%0(s64) = G_CONSTANT i64 -1
- %rax = COPY %0(s64)
- RET 0, implicit %rax
+ $rax = COPY %0(s64)
+ RET 0, implicit $rax
...
---
@@ -172,14 +172,14 @@
- { id: 1, class: gpr, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; CHECK-LABEL: name: main
- ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; CHECK: [[MOV64ri32_:%[0-9]+]]:gr64 = MOV64ri32 0
- ; CHECK: MOV64mr [[COPY]], 1, %noreg, 0, %noreg, [[MOV64ri32_]] :: (store 8 into %ir.data)
+ ; CHECK: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[MOV64ri32_]] :: (store 8 into %ir.data)
; CHECK: RET 0
- %0(p0) = COPY %rdi
+ %0(p0) = COPY $rdi
%1(p0) = G_CONSTANT i64 0
G_STORE %1(p0), %0(p0) :: (store 8 into %ir.data)
RET 0
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-copy.mir b/llvm/test/CodeGen/X86/GlobalISel/select-copy.mir
index 90e2c9f..e0ddf57 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-copy.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-copy.mir
@@ -40,18 +40,18 @@
registers:
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
-# ALL %0:gr8 = COPY %al
+# ALL %0:gr8 = COPY $al
# ALL-NEXT %1:gr32 = MOVZX32rr8 %0
-# ALL-NEXT %eax = COPY %1
-# ALL-NEXT RET 0, implicit %eax
+# ALL-NEXT $eax = COPY %1
+# ALL-NEXT RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %eax
+ liveins: $eax
- %0(s8) = COPY %al
+ %0(s8) = COPY $al
%1(s32) = G_ZEXT %0(s8)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -66,18 +66,18 @@
registers:
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
-# ALL: %0:gr8 = COPY %al
+# ALL: %0:gr8 = COPY $al
# ALL-NEXT: %1:gr32 = MOVZX32rr8 %0
-# ALL-NEXT: %eax = COPY %1
-# ALL-NEXT: RET 0, implicit %eax
+# ALL-NEXT: $eax = COPY %1
+# ALL-NEXT: RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %eax
+ liveins: $eax
- %0(s8) = COPY %al
+ %0(s8) = COPY $al
%1(s32) = G_ZEXT %0(s8)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -94,20 +94,20 @@
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: gpr, preferred-register: '' }
-# ALL %0:gr16 = COPY %ax
+# ALL %0:gr16 = COPY $ax
# ALL-NEXT %1:gr8 = COPY %0.sub_8bit
# ALL-NEXT %2:gr32 = MOVZX32rr8 %1
-# ALL-NEXT %eax = COPY %2
-# ALL-NEXT RET 0, implicit %eax
+# ALL-NEXT $eax = COPY %2
+# ALL-NEXT RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %eax
+ liveins: $eax
- %0(s16) = COPY %ax
+ %0(s16) = COPY $ax
%1(s8) = G_TRUNC %0(s16)
%2(s32) = G_ZEXT %1(s8)
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -124,20 +124,20 @@
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: gpr, preferred-register: '' }
-# ALL %0:gr32 = COPY %eax
+# ALL %0:gr32 = COPY $eax
# ALL-NEXT %1:gr16 = COPY %0.sub_16bit
# ALL-NEXT %2:gr32 = MOVZX32rr16 %1
-# ALL-NEXT %eax = COPY %2
-# ALL-NEXT RET 0, implicit %eax
+# ALL-NEXT $eax = COPY %2
+# ALL-NEXT RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %eax
+ liveins: $eax
- %0(s32) = COPY %eax
+ %0(s32) = COPY $eax
%1(s16) = G_TRUNC %0(s32)
%2(s32) = G_ZEXT %1(s16)
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -154,20 +154,20 @@
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: gpr, preferred-register: '' }
-# ALL %0:gr32[[ABCD]] = COPY %edx
+# ALL %0:gr32[[ABCD]] = COPY $edx
# ALL-NEXT %1:gr8 = COPY %0.sub_8bit
# ALL-NEXT %2:gr32 = MOVZX32rr8 %1
-# ALL-NEXT %eax = COPY %2
-# ALL-NEXT RET 0, implicit %eax
+# ALL-NEXT $eax = COPY %2
+# ALL-NEXT RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %eax,%edx
+ liveins: $eax,$edx
- %0(s32) = COPY %edx
+ %0(s32) = COPY $edx
%1(s8) = G_TRUNC %0(s32)
%2(s32) = G_ANYEXT %1(s8)
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -184,20 +184,20 @@
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: gpr, preferred-register: '' }
-# ALL %0:gr32 = COPY %edx
+# ALL %0:gr32 = COPY $edx
# ALL-NEXT %1:gr16 = COPY %0.sub_16bit
# ALL-NEXT %2:gr32 = SUBREG_TO_REG 0, %1, %subreg.sub_16bit
-# ALL-NEXT %eax = COPY %2
-# ALL-NEXT RET 0, implicit %eax
+# ALL-NEXT $eax = COPY %2
+# ALL-NEXT RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %eax,%edx
+ liveins: $eax,$edx
- %0(s32) = COPY %edx
+ %0(s32) = COPY $edx
%1(s16) = G_TRUNC %0(s32)
%2(s32) = G_ANYEXT %1(s16)
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir b/llvm/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir
index a52ca89..a45e102 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir
@@ -34,19 +34,19 @@
- { id: 2, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; ALL-LABEL: name: test_zext_i1
- ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY %dil
+ ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY $dil
; ALL: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, [[COPY]], %subreg.sub_8bit
- ; ALL: [[AND64ri8_:%[0-9]+]]:gr64 = AND64ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; ALL: %rax = COPY [[AND64ri8_]]
- ; ALL: RET 0, implicit %rax
- %0(s8) = COPY %dil
+ ; ALL: [[AND64ri8_:%[0-9]+]]:gr64 = AND64ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; ALL: $rax = COPY [[AND64ri8_]]
+ ; ALL: RET 0, implicit $rax
+ %0(s8) = COPY $dil
%1(s1) = G_TRUNC %0(s8)
%2(s64) = G_ZEXT %1(s1)
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
---
@@ -59,17 +59,17 @@
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; ALL-LABEL: name: test_sext_i8
- ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY %dil
+ ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY $dil
; ALL: [[MOVSX64rr8_:%[0-9]+]]:gr64 = MOVSX64rr8 [[COPY]]
- ; ALL: %rax = COPY [[MOVSX64rr8_]]
- ; ALL: RET 0, implicit %rax
- %0(s8) = COPY %dil
+ ; ALL: $rax = COPY [[MOVSX64rr8_]]
+ ; ALL: RET 0, implicit $rax
+ %0(s8) = COPY $dil
%1(s64) = G_SEXT %0(s8)
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
---
@@ -82,17 +82,17 @@
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; ALL-LABEL: name: test_sext_i16
- ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY %di
+ ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY $di
; ALL: [[MOVSX64rr16_:%[0-9]+]]:gr64 = MOVSX64rr16 [[COPY]]
- ; ALL: %rax = COPY [[MOVSX64rr16_]]
- ; ALL: RET 0, implicit %rax
- %0(s16) = COPY %di
+ ; ALL: $rax = COPY [[MOVSX64rr16_]]
+ ; ALL: RET 0, implicit $rax
+ %0(s16) = COPY $di
%1(s64) = G_SEXT %0(s16)
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
---
@@ -106,19 +106,19 @@
- { id: 2, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; ALL-LABEL: name: anyext_s64_from_s1
- ; ALL: [[COPY:%[0-9]+]]:gr64_with_sub_8bit = COPY %rdi
+ ; ALL: [[COPY:%[0-9]+]]:gr64_with_sub_8bit = COPY $rdi
; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit
; ALL: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, [[COPY1]], %subreg.sub_8bit
- ; ALL: %rax = COPY [[SUBREG_TO_REG]]
- ; ALL: RET 0, implicit %rax
- %0(s64) = COPY %rdi
+ ; ALL: $rax = COPY [[SUBREG_TO_REG]]
+ ; ALL: RET 0, implicit $rax
+ %0(s64) = COPY $rdi
%1(s1) = G_TRUNC %0(s64)
%2(s64) = G_ANYEXT %1(s1)
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
---
name: anyext_s64_from_s8
@@ -131,19 +131,19 @@
- { id: 2, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; ALL-LABEL: name: anyext_s64_from_s8
- ; ALL: [[COPY:%[0-9]+]]:gr64_with_sub_8bit = COPY %rdi
+ ; ALL: [[COPY:%[0-9]+]]:gr64_with_sub_8bit = COPY $rdi
; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit
; ALL: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, [[COPY1]], %subreg.sub_8bit
- ; ALL: %rax = COPY [[SUBREG_TO_REG]]
- ; ALL: RET 0, implicit %rax
- %0(s64) = COPY %rdi
+ ; ALL: $rax = COPY [[SUBREG_TO_REG]]
+ ; ALL: RET 0, implicit $rax
+ %0(s64) = COPY $rdi
%1(s8) = G_TRUNC %0(s64)
%2(s64) = G_ANYEXT %1(s8)
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
---
name: anyext_s64_from_s16
@@ -156,19 +156,19 @@
- { id: 2, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; ALL-LABEL: name: anyext_s64_from_s16
- ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
+ ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit
; ALL: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, [[COPY1]], %subreg.sub_16bit
- ; ALL: %rax = COPY [[SUBREG_TO_REG]]
- ; ALL: RET 0, implicit %rax
- %0(s64) = COPY %rdi
+ ; ALL: $rax = COPY [[SUBREG_TO_REG]]
+ ; ALL: RET 0, implicit $rax
+ %0(s64) = COPY $rdi
%1(s16) = G_TRUNC %0(s64)
%2(s64) = G_ANYEXT %1(s16)
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
---
name: anyext_s64_from_s32
@@ -181,17 +181,17 @@
- { id: 2, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; ALL-LABEL: name: anyext_s64_from_s32
- ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
+ ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY [[COPY]].sub_32bit
; ALL: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, [[COPY1]], %subreg.sub_32bit
- ; ALL: %rax = COPY [[SUBREG_TO_REG]]
- ; ALL: RET 0, implicit %rax
- %0(s64) = COPY %rdi
+ ; ALL: $rax = COPY [[SUBREG_TO_REG]]
+ ; ALL: RET 0, implicit $rax
+ %0(s64) = COPY $rdi
%1(s32) = G_TRUNC %0(s64)
%2(s64) = G_ANYEXT %1(s32)
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-ext.mir b/llvm/test/CodeGen/X86/GlobalISel/select-ext.mir
index e141548..d7fcb1d 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-ext.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-ext.mir
@@ -64,21 +64,21 @@
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: gpr, preferred-register: '' }
-# X32: %0:gr32_abcd = COPY %edi
-# X64: %0:gr32 = COPY %edi
+# X32: %0:gr32_abcd = COPY $edi
+# X64: %0:gr32 = COPY $edi
# ALL_NEXT: %1:gr8 = COPY %0.sub_8bit
-# ALL_NEXT: %2:gr8 = AND8ri %1, 1, implicit-def %eflags
-# ALL_NEXT: %al = COPY %2
-# ALL_NEXT: RET 0, implicit %al
+# ALL_NEXT: %2:gr8 = AND8ri %1, 1, implicit-def $eflags
+# ALL_NEXT: $al = COPY %2
+# ALL_NEXT: RET 0, implicit $al
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- %0(s32) = COPY %edi
+ %0(s32) = COPY $edi
%1(s1) = G_TRUNC %0(s32)
%2(s8) = G_ZEXT %1(s1)
- %al = COPY %2(s8)
- RET 0, implicit %al
+ $al = COPY %2(s8)
+ RET 0, implicit $al
...
---
@@ -102,22 +102,22 @@
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: gpr, preferred-register: '' }
-# X32: %0:gr32_abcd = COPY %edi
-# X64: %0:gr32 = COPY %edi
+# X32: %0:gr32_abcd = COPY $edi
+# X64: %0:gr32 = COPY $edi
# ALL_NEXT: %1:gr8 = COPY %0.sub_8bit
# ALL_NEXT: %3:gr16 = SUBREG_TO_REG 0, %1, %subreg.sub_8bit
-# ALL_NEXT: %2:gr16 = AND16ri8 %3, 1, implicit-def %eflags
-# ALL_NEXT: %ax = COPY %2
-# ALL_NEXT: RET 0, implicit %ax
+# ALL_NEXT: %2:gr16 = AND16ri8 %3, 1, implicit-def $eflags
+# ALL_NEXT: $ax = COPY %2
+# ALL_NEXT: RET 0, implicit $ax
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- %0(s32) = COPY %edi
+ %0(s32) = COPY $edi
%1(s1) = G_TRUNC %0(s32)
%2(s16) = G_ZEXT %1(s1)
- %ax = COPY %2(s16)
- RET 0, implicit %ax
+ $ax = COPY %2(s16)
+ RET 0, implicit $ax
...
---
@@ -141,22 +141,22 @@
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# X32: %0:gr32_abcd = COPY %edi
-# X64: %0:gr32 = COPY %edi
+# X32: %0:gr32_abcd = COPY $edi
+# X64: %0:gr32 = COPY $edi
# ALL_NEXT: %1:gr8 = COPY %0.sub_8bit
# ALL_NEXT: %3:gr32 = SUBREG_TO_REG 0, %1, %subreg.sub_8bit
-# ALL_NEXT: %2:gr32 = AND32ri8 %3, 1, implicit-def %eflags
-# ALL_NEXT: %eax = COPY %2
-# ALL_NEXT: RET 0, implicit %eax
+# ALL_NEXT: %2:gr32 = AND32ri8 %3, 1, implicit-def $eflags
+# ALL_NEXT: $eax = COPY %2
+# ALL_NEXT: RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- %0(s32) = COPY %edi
+ %0(s32) = COPY $edi
%1(s1) = G_TRUNC %0(s32)
%2(s32) = G_ZEXT %1(s1)
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -171,18 +171,18 @@
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
-# ALL: %0:gr8 = COPY %dil
+# ALL: %0:gr8 = COPY $dil
# ALL-NEXT: %1:gr32 = MOVZX32rr8 %0
-# ALL-NEXT: %eax = COPY %1
-# ALL-NEXT: RET 0, implicit %eax
+# ALL-NEXT: $eax = COPY %1
+# ALL-NEXT: RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- %0(s8) = COPY %dil
+ %0(s8) = COPY $dil
%1(s32) = G_ZEXT %0(s8)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -197,18 +197,18 @@
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
-# ALL: %0:gr16 = COPY %di
+# ALL: %0:gr16 = COPY $di
# ALL-NEXT: %1:gr32 = MOVZX32rr16 %0
-# ALL-NEXT: %eax = COPY %1
-# ALL-NEXT: RET 0, implicit %eax
+# ALL-NEXT: $eax = COPY %1
+# ALL-NEXT: RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- %0(s16) = COPY %di
+ %0(s16) = COPY $di
%1(s32) = G_ZEXT %0(s16)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -223,18 +223,18 @@
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
-# ALL: %0:gr8 = COPY %dil
+# ALL: %0:gr8 = COPY $dil
# ALL-NEXT: %1:gr32 = MOVSX32rr8 %0
-# ALL-NEXT: %eax = COPY %1
-# ALL-NEXT: RET 0, implicit %eax
+# ALL-NEXT: $eax = COPY %1
+# ALL-NEXT: RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- %0(s8) = COPY %dil
+ %0(s8) = COPY $dil
%1(s32) = G_SEXT %0(s8)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -249,18 +249,18 @@
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
-# ALL: %0:gr16 = COPY %di
+# ALL: %0:gr16 = COPY $di
# ALL-NEXT: %1:gr32 = MOVSX32rr16 %0
-# ALL-NEXT: %eax = COPY %1
-# ALL-NEXT: RET 0, implicit %eax
+# ALL-NEXT: $eax = COPY %1
+# ALL-NEXT: RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- %0(s16) = COPY %di
+ %0(s16) = COPY $di
%1(s32) = G_SEXT %0(s16)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -282,20 +282,20 @@
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# X32: %0:gr32_abcd = COPY %edi
-# X64: %0:gr32 = COPY %edi
+# X32: %0:gr32_abcd = COPY $edi
+# X64: %0:gr32 = COPY $edi
# ALL-NEXT: %1:gr8 = COPY %0.sub_8bit
-# ALL-NEXT: %al = COPY %1
-# ALL-NEXT: RET 0, implicit %al
+# ALL-NEXT: $al = COPY %1
+# ALL-NEXT: RET 0, implicit $al
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- %0(s32) = COPY %edi
+ %0(s32) = COPY $edi
%1(s1) = G_TRUNC %0(s32)
%2(s8) = G_ANYEXT %1(s1)
- %al = COPY %2(s8)
- RET 0, implicit %al
+ $al = COPY %2(s8)
+ RET 0, implicit $al
...
---
name: test_anyext_i1toi16
@@ -316,21 +316,21 @@
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# X32: %0:gr32_abcd = COPY %edi
-# X64: %0:gr32 = COPY %edi
+# X32: %0:gr32_abcd = COPY $edi
+# X64: %0:gr32 = COPY $edi
# ALL-NEXT: %1:gr8 = COPY %0.sub_8bit
# ALL-NEXT: %2:gr16 = SUBREG_TO_REG 0, %1, %subreg.sub_8bit
-# ALL-NEXT: %ax = COPY %2
-# ALL-NEXT: RET 0, implicit %ax
+# ALL-NEXT: $ax = COPY %2
+# ALL-NEXT: RET 0, implicit $ax
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- %0(s32) = COPY %edi
+ %0(s32) = COPY $edi
%1(s1) = G_TRUNC %0(s32)
%2(s16) = G_ANYEXT %1(s1)
- %ax = COPY %2(s16)
- RET 0, implicit %ax
+ $ax = COPY %2(s16)
+ RET 0, implicit $ax
...
---
name: test_anyext_i1toi32
@@ -351,21 +351,21 @@
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# X32: %0:gr32_abcd = COPY %edi
-# X64: %0:gr32 = COPY %edi
+# X32: %0:gr32_abcd = COPY $edi
+# X64: %0:gr32 = COPY $edi
# ALL-NEXT: %1:gr8 = COPY %0.sub_8bit
# ALL-NEXT: %2:gr32 = SUBREG_TO_REG 0, %1, %subreg.sub_8bit
-# ALL-NEXT: %eax = COPY %2
-# ALL-NEXT: RET 0, implicit %eax
+# ALL-NEXT: $eax = COPY %2
+# ALL-NEXT: RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- %0(s32) = COPY %edi
+ %0(s32) = COPY $edi
%1(s1) = G_TRUNC %0(s32)
%2(s32) = G_ANYEXT %1(s1)
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
name: test_anyext_i8toi16
@@ -386,21 +386,21 @@
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# X32: %0:gr32_abcd = COPY %edi
-# X64: %0:gr32 = COPY %edi
+# X32: %0:gr32_abcd = COPY $edi
+# X64: %0:gr32 = COPY $edi
# ALL-NEXT: %1:gr8 = COPY %0.sub_8bit
# ALL-NEXT: %2:gr16 = SUBREG_TO_REG 0, %1, %subreg.sub_8bit
-# ALL-NEXT: %ax = COPY %2
-# ALL-NEXT: RET 0, implicit %ax
+# ALL-NEXT: $ax = COPY %2
+# ALL-NEXT: RET 0, implicit $ax
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- %0(s32) = COPY %edi
+ %0(s32) = COPY $edi
%1(s8) = G_TRUNC %0(s32)
%2(s16) = G_ANYEXT %1(s8)
- %ax = COPY %2(s16)
- RET 0, implicit %ax
+ $ax = COPY %2(s16)
+ RET 0, implicit $ax
...
---
name: test_anyext_i8toi32
@@ -421,21 +421,21 @@
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# X32: %0:gr32_abcd = COPY %edi
-# X64: %0:gr32 = COPY %edi
+# X32: %0:gr32_abcd = COPY $edi
+# X64: %0:gr32 = COPY $edi
# ALL-NEXT: %1:gr8 = COPY %0.sub_8bit
# ALL-NEXT: %2:gr32 = MOVZX32rr8 %1
-# ALL-NEXT: %eax = COPY %2
-# ALL-NEXT: RET 0, implicit %eax
+# ALL-NEXT: $eax = COPY %2
+# ALL-NEXT: RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- %0(s32) = COPY %edi
+ %0(s32) = COPY $edi
%1(s8) = G_TRUNC %0(s32)
%2(s32) = G_ANYEXT %1(s8)
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
name: test_anyext_i16toi32
@@ -451,18 +451,18 @@
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# ALL: %0:gr32 = COPY %edi
+# ALL: %0:gr32 = COPY $edi
# ALL-NEXT: %1:gr16 = COPY %0.sub_16bit
# ALL-NEXT: %2:gr32 = SUBREG_TO_REG 0, %1, %subreg.sub_16bit
-# ALL-NEXT: %eax = COPY %2
-# ALL-NEXT: RET 0, implicit %eax
+# ALL-NEXT: $eax = COPY %2
+# ALL-NEXT: RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- %0(s32) = COPY %edi
+ %0(s32) = COPY $edi
%1(s16) = G_TRUNC %0(s32)
%2(s32) = G_ANYEXT %1(s16)
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-extract-vec256.mir b/llvm/test/CodeGen/X86/GlobalISel/select-extract-vec256.mir
index 01f43be..35fb178 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-extract-vec256.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-extract-vec256.mir
@@ -27,20 +27,20 @@
registers:
- { id: 0, class: vecr }
- { id: 1, class: vecr }
-# AVX: %0:vr256 = COPY %ymm1
+# AVX: %0:vr256 = COPY $ymm1
# AVX-NEXT: %1:vr128 = COPY %0.sub_xmm
-# AVX512VL: %0:vr256x = COPY %ymm1
+# AVX512VL: %0:vr256x = COPY $ymm1
# AVX512VL-NEXT: %1:vr128x = COPY %0.sub_xmm
-# ALL-NEXT: %xmm0 = COPY %1
-# ALL-NEXT: RET 0, implicit %xmm0
+# ALL-NEXT: $xmm0 = COPY %1
+# ALL-NEXT: RET 0, implicit $xmm0
body: |
bb.1 (%ir-block.0):
- liveins: %ymm1
+ liveins: $ymm1
- %0(<8 x s32>) = COPY %ymm1
+ %0(<8 x s32>) = COPY $ymm1
%1(<4 x s32>) = G_EXTRACT %0(<8 x s32>), 0
- %xmm0 = COPY %1(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -59,22 +59,22 @@
registers:
- { id: 0, class: vecr }
- { id: 1, class: vecr }
-# AVX: %0:vr256 = COPY %ymm1
+# AVX: %0:vr256 = COPY $ymm1
# AVX-NEXT: %1:vr128 = VEXTRACTF128rr %0, 1
-# AVX-NEXT: %xmm0 = COPY %1
-# AVX-NEXT: RET 0, implicit %xmm0
+# AVX-NEXT: $xmm0 = COPY %1
+# AVX-NEXT: RET 0, implicit $xmm0
#
-# AVX512VL: %0:vr256x = COPY %ymm1
+# AVX512VL: %0:vr256x = COPY $ymm1
# AVX512VL-NEXT: %1:vr128x = VEXTRACTF32x4Z256rr %0, 1
-# AVX512VL-NEXT: %xmm0 = COPY %1
-# AVX512VL-NEXT: RET 0, implicit %xmm0
+# AVX512VL-NEXT: $xmm0 = COPY %1
+# AVX512VL-NEXT: RET 0, implicit $xmm0
body: |
bb.1 (%ir-block.0):
- liveins: %ymm1
+ liveins: $ymm1
- %0(<8 x s32>) = COPY %ymm1
+ %0(<8 x s32>) = COPY $ymm1
%1(<4 x s32>) = G_EXTRACT %0(<8 x s32>), 128
- %xmm0 = COPY %1(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(<4 x s32>)
+ RET 0, implicit $xmm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-extract-vec512.mir b/llvm/test/CodeGen/X86/GlobalISel/select-extract-vec512.mir
index b17b979..64781f2 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-extract-vec512.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-extract-vec512.mir
@@ -32,18 +32,18 @@
registers:
- { id: 0, class: vecr }
- { id: 1, class: vecr }
-# ALL: %0:vr512 = COPY %zmm1
+# ALL: %0:vr512 = COPY $zmm1
# ALL-NEXT: %1:vr128x = COPY %0.sub_xmm
-# ALL-NEXT: %xmm0 = COPY %1
-# ALL-NEXT: RET 0, implicit %xmm0
+# ALL-NEXT: $xmm0 = COPY %1
+# ALL-NEXT: RET 0, implicit $xmm0
body: |
bb.1 (%ir-block.0):
- liveins: %zmm1
+ liveins: $zmm1
- %0(<16 x s32>) = COPY %zmm1
+ %0(<16 x s32>) = COPY $zmm1
%1(<4 x s32>) = G_EXTRACT %0(<16 x s32>), 0
- %xmm0 = COPY %1(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -58,18 +58,18 @@
registers:
- { id: 0, class: vecr }
- { id: 1, class: vecr }
-# ALL: %0:vr512 = COPY %zmm1
+# ALL: %0:vr512 = COPY $zmm1
# ALL-NEXT: %1:vr128x = VEXTRACTF32x4Zrr %0, 1
-# ALL-NEXT: %xmm0 = COPY %1
-# ALL-NEXT: RET 0, implicit %xmm0
+# ALL-NEXT: $xmm0 = COPY %1
+# ALL-NEXT: RET 0, implicit $xmm0
body: |
bb.1 (%ir-block.0):
- liveins: %zmm1
+ liveins: $zmm1
- %0(<16 x s32>) = COPY %zmm1
+ %0(<16 x s32>) = COPY $zmm1
%1(<4 x s32>) = G_EXTRACT %0(<16 x s32>), 128
- %xmm0 = COPY %1(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -84,18 +84,18 @@
registers:
- { id: 0, class: vecr }
- { id: 1, class: vecr }
-# ALL: %0:vr512 = COPY %zmm1
+# ALL: %0:vr512 = COPY $zmm1
# ALL-NEXT: %1:vr256x = COPY %0.sub_ymm
-# ALL-NEXT: %ymm0 = COPY %1
-# ALL-NEXT: RET 0, implicit %ymm0
+# ALL-NEXT: $ymm0 = COPY %1
+# ALL-NEXT: RET 0, implicit $ymm0
body: |
bb.1 (%ir-block.0):
- liveins: %zmm1
+ liveins: $zmm1
- %0(<16 x s32>) = COPY %zmm1
+ %0(<16 x s32>) = COPY $zmm1
%1(<8 x s32>) = G_EXTRACT %0(<16 x s32>), 0
- %ymm0 = COPY %1(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %1(<8 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -110,17 +110,17 @@
registers:
- { id: 0, class: vecr }
- { id: 1, class: vecr }
-# ALL: %0:vr512 = COPY %zmm1
+# ALL: %0:vr512 = COPY $zmm1
# ALL-NEXT: %1:vr256x = VEXTRACTF64x4Zrr %0, 1
-# ALL-NEXT: %ymm0 = COPY %1
-# ALL-NEXT: RET 0, implicit %ymm0
+# ALL-NEXT: $ymm0 = COPY %1
+# ALL-NEXT: RET 0, implicit $ymm0
body: |
bb.1 (%ir-block.0):
- liveins: %zmm1
+ liveins: $zmm1
- %0(<16 x s32>) = COPY %zmm1
+ %0(<16 x s32>) = COPY $zmm1
%1(<8 x s32>) = G_EXTRACT %0(<16 x s32>), 256
- %ymm0 = COPY %1(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %1(<8 x s32>)
+ RET 0, implicit $ymm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir
index 4dd2d5d..9c8ab6f 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir
@@ -34,37 +34,37 @@
#
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; SSE-LABEL: name: test_fadd_float
- ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0
- ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY %xmm1
+ ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1
; SSE: [[ADDSSrr:%[0-9]+]]:fr32 = ADDSSrr [[COPY]], [[COPY1]]
- ; SSE: %xmm0 = COPY [[ADDSSrr]]
- ; SSE: RET 0, implicit %xmm0
+ ; SSE: $xmm0 = COPY [[ADDSSrr]]
+ ; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fadd_float
- ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0
- ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY %xmm1
+ ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1
; AVX: [[VADDSSrr:%[0-9]+]]:fr32 = VADDSSrr [[COPY]], [[COPY1]]
- ; AVX: %xmm0 = COPY [[VADDSSrr]]
- ; AVX: RET 0, implicit %xmm0
+ ; AVX: $xmm0 = COPY [[VADDSSrr]]
+ ; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fadd_float
- ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY %xmm1
+ ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1
; AVX512F: [[VADDSSZrr:%[0-9]+]]:fr32x = VADDSSZrr [[COPY]], [[COPY1]]
- ; AVX512F: %xmm0 = COPY [[VADDSSZrr]]
- ; AVX512F: RET 0, implicit %xmm0
+ ; AVX512F: $xmm0 = COPY [[VADDSSZrr]]
+ ; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fadd_float
- ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY %xmm1
+ ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1
; AVX512VL: [[VADDSSZrr:%[0-9]+]]:fr32x = VADDSSZrr [[COPY]], [[COPY1]]
- ; AVX512VL: %xmm0 = COPY [[VADDSSZrr]]
- ; AVX512VL: RET 0, implicit %xmm0
- %0(s32) = COPY %xmm0
- %1(s32) = COPY %xmm1
+ ; AVX512VL: $xmm0 = COPY [[VADDSSZrr]]
+ ; AVX512VL: RET 0, implicit $xmm0
+ %0(s32) = COPY $xmm0
+ %1(s32) = COPY $xmm1
%2(s32) = G_FADD %0, %1
- %xmm0 = COPY %2(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s32)
+ RET 0, implicit $xmm0
...
---
@@ -85,36 +85,36 @@
#
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; SSE-LABEL: name: test_fadd_double
- ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0
- ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY %xmm1
+ ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1
; SSE: [[ADDSDrr:%[0-9]+]]:fr64 = ADDSDrr [[COPY]], [[COPY1]]
- ; SSE: %xmm0 = COPY [[ADDSDrr]]
- ; SSE: RET 0, implicit %xmm0
+ ; SSE: $xmm0 = COPY [[ADDSDrr]]
+ ; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fadd_double
- ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0
- ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY %xmm1
+ ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1
; AVX: [[VADDSDrr:%[0-9]+]]:fr64 = VADDSDrr [[COPY]], [[COPY1]]
- ; AVX: %xmm0 = COPY [[VADDSDrr]]
- ; AVX: RET 0, implicit %xmm0
+ ; AVX: $xmm0 = COPY [[VADDSDrr]]
+ ; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fadd_double
- ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY %xmm1
+ ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1
; AVX512F: [[VADDSDZrr:%[0-9]+]]:fr64x = VADDSDZrr [[COPY]], [[COPY1]]
- ; AVX512F: %xmm0 = COPY [[VADDSDZrr]]
- ; AVX512F: RET 0, implicit %xmm0
+ ; AVX512F: $xmm0 = COPY [[VADDSDZrr]]
+ ; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fadd_double
- ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY %xmm1
+ ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1
; AVX512VL: [[VADDSDZrr:%[0-9]+]]:fr64x = VADDSDZrr [[COPY]], [[COPY1]]
- ; AVX512VL: %xmm0 = COPY [[VADDSDZrr]]
- ; AVX512VL: RET 0, implicit %xmm0
- %0(s64) = COPY %xmm0
- %1(s64) = COPY %xmm1
+ ; AVX512VL: $xmm0 = COPY [[VADDSDZrr]]
+ ; AVX512VL: RET 0, implicit $xmm0
+ %0(s64) = COPY $xmm0
+ %1(s64) = COPY $xmm1
%2(s64) = G_FADD %0, %1
- %xmm0 = COPY %2(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s64)
+ RET 0, implicit $xmm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-fconstant.mir b/llvm/test/CodeGen/X86/GlobalISel/select-fconstant.mir
index 07cf84d..17a3caa 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-fconstant.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-fconstant.mir
@@ -29,29 +29,29 @@
body: |
bb.1.entry:
; CHECK_NOPIC64-LABEL: name: test_float
- ; CHECK_NOPIC64: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm %rip, 1, %noreg, %const.0, %noreg
- ; CHECK_NOPIC64: %xmm0 = COPY [[MOVSSrm]]
- ; CHECK_NOPIC64: RET 0, implicit %xmm0
+ ; CHECK_NOPIC64: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm $rip, 1, $noreg, %const.0, $noreg
+ ; CHECK_NOPIC64: $xmm0 = COPY [[MOVSSrm]]
+ ; CHECK_NOPIC64: RET 0, implicit $xmm0
; CHECK_LARGE64-LABEL: name: test_float
; CHECK_LARGE64: [[MOV64ri:%[0-9]+]]:gr64 = MOV64ri %const.0
- ; CHECK_LARGE64: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm [[MOV64ri]], 1, %noreg, 0, %noreg :: (load 8 from constant-pool, align 32)
- ; CHECK_LARGE64: %xmm0 = COPY [[MOVSSrm]]
- ; CHECK_LARGE64: RET 0, implicit %xmm0
+ ; CHECK_LARGE64: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm [[MOV64ri]], 1, $noreg, 0, $noreg :: (load 8 from constant-pool, align 32)
+ ; CHECK_LARGE64: $xmm0 = COPY [[MOVSSrm]]
+ ; CHECK_LARGE64: RET 0, implicit $xmm0
; CHECK_SMALL32-LABEL: name: test_float
- ; CHECK_SMALL32: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm %noreg, 1, %noreg, %const.0, %noreg
- ; CHECK_SMALL32: %xmm0 = COPY [[MOVSSrm]]
- ; CHECK_SMALL32: RET 0, implicit %xmm0
+ ; CHECK_SMALL32: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm $noreg, 1, $noreg, %const.0, $noreg
+ ; CHECK_SMALL32: $xmm0 = COPY [[MOVSSrm]]
+ ; CHECK_SMALL32: RET 0, implicit $xmm0
; CHECK_LARGE32-LABEL: name: test_float
- ; CHECK_LARGE32: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm %noreg, 1, %noreg, %const.0, %noreg
- ; CHECK_LARGE32: %xmm0 = COPY [[MOVSSrm]]
- ; CHECK_LARGE32: RET 0, implicit %xmm0
+ ; CHECK_LARGE32: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm $noreg, 1, $noreg, %const.0, $noreg
+ ; CHECK_LARGE32: $xmm0 = COPY [[MOVSSrm]]
+ ; CHECK_LARGE32: RET 0, implicit $xmm0
; CHECK_PIC64-LABEL: name: test_float
- ; CHECK_PIC64: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm %rip, 1, %noreg, %const.0, %noreg
- ; CHECK_PIC64: %xmm0 = COPY [[MOVSSrm]]
- ; CHECK_PIC64: RET 0, implicit %xmm0
+ ; CHECK_PIC64: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm $rip, 1, $noreg, %const.0, $noreg
+ ; CHECK_PIC64: $xmm0 = COPY [[MOVSSrm]]
+ ; CHECK_PIC64: RET 0, implicit $xmm0
%0(s32) = G_FCONSTANT float 5.500000e+00
- %xmm0 = COPY %0(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %0(s32)
+ RET 0, implicit $xmm0
...
---
@@ -70,28 +70,28 @@
body: |
bb.1.entry:
; CHECK_NOPIC64-LABEL: name: test_double
- ; CHECK_NOPIC64: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm %rip, 1, %noreg, %const.0, %noreg
- ; CHECK_NOPIC64: %xmm0 = COPY [[MOVSDrm]]
- ; CHECK_NOPIC64: RET 0, implicit %xmm0
+ ; CHECK_NOPIC64: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm $rip, 1, $noreg, %const.0, $noreg
+ ; CHECK_NOPIC64: $xmm0 = COPY [[MOVSDrm]]
+ ; CHECK_NOPIC64: RET 0, implicit $xmm0
; CHECK_LARGE64-LABEL: name: test_double
; CHECK_LARGE64: [[MOV64ri:%[0-9]+]]:gr64 = MOV64ri %const.0
- ; CHECK_LARGE64: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm [[MOV64ri]], 1, %noreg, 0, %noreg :: (load 8 from constant-pool, align 64)
- ; CHECK_LARGE64: %xmm0 = COPY [[MOVSDrm]]
- ; CHECK_LARGE64: RET 0, implicit %xmm0
+ ; CHECK_LARGE64: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm [[MOV64ri]], 1, $noreg, 0, $noreg :: (load 8 from constant-pool, align 64)
+ ; CHECK_LARGE64: $xmm0 = COPY [[MOVSDrm]]
+ ; CHECK_LARGE64: RET 0, implicit $xmm0
; CHECK_SMALL32-LABEL: name: test_double
- ; CHECK_SMALL32: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm %noreg, 1, %noreg, %const.0, %noreg
- ; CHECK_SMALL32: %xmm0 = COPY [[MOVSDrm]]
- ; CHECK_SMALL32: RET 0, implicit %xmm0
+ ; CHECK_SMALL32: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm $noreg, 1, $noreg, %const.0, $noreg
+ ; CHECK_SMALL32: $xmm0 = COPY [[MOVSDrm]]
+ ; CHECK_SMALL32: RET 0, implicit $xmm0
; CHECK_LARGE32-LABEL: name: test_double
- ; CHECK_LARGE32: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm %noreg, 1, %noreg, %const.0, %noreg
- ; CHECK_LARGE32: %xmm0 = COPY [[MOVSDrm]]
- ; CHECK_LARGE32: RET 0, implicit %xmm0
+ ; CHECK_LARGE32: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm $noreg, 1, $noreg, %const.0, $noreg
+ ; CHECK_LARGE32: $xmm0 = COPY [[MOVSDrm]]
+ ; CHECK_LARGE32: RET 0, implicit $xmm0
; CHECK_PIC64-LABEL: name: test_double
- ; CHECK_PIC64: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm %rip, 1, %noreg, %const.0, %noreg
- ; CHECK_PIC64: %xmm0 = COPY [[MOVSDrm]]
- ; CHECK_PIC64: RET 0, implicit %xmm0
+ ; CHECK_PIC64: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm $rip, 1, $noreg, %const.0, $noreg
+ ; CHECK_PIC64: $xmm0 = COPY [[MOVSDrm]]
+ ; CHECK_PIC64: RET 0, implicit $xmm0
%0(s64) = G_FCONSTANT double 5.500000e+00
- %xmm0 = COPY %0(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %0(s64)
+ RET 0, implicit $xmm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir
index 9d82222..216926f 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir
@@ -34,37 +34,37 @@
#
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; SSE-LABEL: name: test_fdiv_float
- ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0
- ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY %xmm1
+ ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1
; SSE: [[DIVSSrr:%[0-9]+]]:fr32 = DIVSSrr [[COPY]], [[COPY1]]
- ; SSE: %xmm0 = COPY [[DIVSSrr]]
- ; SSE: RET 0, implicit %xmm0
+ ; SSE: $xmm0 = COPY [[DIVSSrr]]
+ ; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fdiv_float
- ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0
- ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY %xmm1
+ ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1
; AVX: [[VDIVSSrr:%[0-9]+]]:fr32 = VDIVSSrr [[COPY]], [[COPY1]]
- ; AVX: %xmm0 = COPY [[VDIVSSrr]]
- ; AVX: RET 0, implicit %xmm0
+ ; AVX: $xmm0 = COPY [[VDIVSSrr]]
+ ; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fdiv_float
- ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY %xmm1
+ ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1
; AVX512F: [[VDIVSSZrr:%[0-9]+]]:fr32x = VDIVSSZrr [[COPY]], [[COPY1]]
- ; AVX512F: %xmm0 = COPY [[VDIVSSZrr]]
- ; AVX512F: RET 0, implicit %xmm0
+ ; AVX512F: $xmm0 = COPY [[VDIVSSZrr]]
+ ; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fdiv_float
- ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY %xmm1
+ ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1
; AVX512VL: [[VDIVSSZrr:%[0-9]+]]:fr32x = VDIVSSZrr [[COPY]], [[COPY1]]
- ; AVX512VL: %xmm0 = COPY [[VDIVSSZrr]]
- ; AVX512VL: RET 0, implicit %xmm0
- %0(s32) = COPY %xmm0
- %1(s32) = COPY %xmm1
+ ; AVX512VL: $xmm0 = COPY [[VDIVSSZrr]]
+ ; AVX512VL: RET 0, implicit $xmm0
+ %0(s32) = COPY $xmm0
+ %1(s32) = COPY $xmm1
%2(s32) = G_FDIV %0, %1
- %xmm0 = COPY %2(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s32)
+ RET 0, implicit $xmm0
...
---
@@ -85,36 +85,36 @@
#
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; SSE-LABEL: name: test_fdiv_double
- ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0
- ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY %xmm1
+ ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1
; SSE: [[DIVSDrr:%[0-9]+]]:fr64 = DIVSDrr [[COPY]], [[COPY1]]
- ; SSE: %xmm0 = COPY [[DIVSDrr]]
- ; SSE: RET 0, implicit %xmm0
+ ; SSE: $xmm0 = COPY [[DIVSDrr]]
+ ; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fdiv_double
- ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0
- ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY %xmm1
+ ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1
; AVX: [[VDIVSDrr:%[0-9]+]]:fr64 = VDIVSDrr [[COPY]], [[COPY1]]
- ; AVX: %xmm0 = COPY [[VDIVSDrr]]
- ; AVX: RET 0, implicit %xmm0
+ ; AVX: $xmm0 = COPY [[VDIVSDrr]]
+ ; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fdiv_double
- ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY %xmm1
+ ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1
; AVX512F: [[VDIVSDZrr:%[0-9]+]]:fr64x = VDIVSDZrr [[COPY]], [[COPY1]]
- ; AVX512F: %xmm0 = COPY [[VDIVSDZrr]]
- ; AVX512F: RET 0, implicit %xmm0
+ ; AVX512F: $xmm0 = COPY [[VDIVSDZrr]]
+ ; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fdiv_double
- ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY %xmm1
+ ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1
; AVX512VL: [[VDIVSDZrr:%[0-9]+]]:fr64x = VDIVSDZrr [[COPY]], [[COPY1]]
- ; AVX512VL: %xmm0 = COPY [[VDIVSDZrr]]
- ; AVX512VL: RET 0, implicit %xmm0
- %0(s64) = COPY %xmm0
- %1(s64) = COPY %xmm1
+ ; AVX512VL: $xmm0 = COPY [[VDIVSDZrr]]
+ ; AVX512VL: RET 0, implicit $xmm0
+ %0(s64) = COPY $xmm0
+ %1(s64) = COPY $xmm1
%2(s64) = G_FDIV %0, %1
- %xmm0 = COPY %2(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s64)
+ RET 0, implicit $xmm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir
index f2485b5..aaedac8 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir
@@ -34,37 +34,37 @@
#
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; SSE-LABEL: name: test_fmul_float
- ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0
- ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY %xmm1
+ ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1
; SSE: [[MULSSrr:%[0-9]+]]:fr32 = MULSSrr [[COPY]], [[COPY1]]
- ; SSE: %xmm0 = COPY [[MULSSrr]]
- ; SSE: RET 0, implicit %xmm0
+ ; SSE: $xmm0 = COPY [[MULSSrr]]
+ ; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fmul_float
- ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0
- ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY %xmm1
+ ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1
; AVX: [[VMULSSrr:%[0-9]+]]:fr32 = VMULSSrr [[COPY]], [[COPY1]]
- ; AVX: %xmm0 = COPY [[VMULSSrr]]
- ; AVX: RET 0, implicit %xmm0
+ ; AVX: $xmm0 = COPY [[VMULSSrr]]
+ ; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fmul_float
- ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY %xmm1
+ ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1
; AVX512F: [[VMULSSZrr:%[0-9]+]]:fr32x = VMULSSZrr [[COPY]], [[COPY1]]
- ; AVX512F: %xmm0 = COPY [[VMULSSZrr]]
- ; AVX512F: RET 0, implicit %xmm0
+ ; AVX512F: $xmm0 = COPY [[VMULSSZrr]]
+ ; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fmul_float
- ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY %xmm1
+ ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1
; AVX512VL: [[VMULSSZrr:%[0-9]+]]:fr32x = VMULSSZrr [[COPY]], [[COPY1]]
- ; AVX512VL: %xmm0 = COPY [[VMULSSZrr]]
- ; AVX512VL: RET 0, implicit %xmm0
- %0(s32) = COPY %xmm0
- %1(s32) = COPY %xmm1
+ ; AVX512VL: $xmm0 = COPY [[VMULSSZrr]]
+ ; AVX512VL: RET 0, implicit $xmm0
+ %0(s32) = COPY $xmm0
+ %1(s32) = COPY $xmm1
%2(s32) = G_FMUL %0, %1
- %xmm0 = COPY %2(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s32)
+ RET 0, implicit $xmm0
...
---
@@ -85,36 +85,36 @@
#
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; SSE-LABEL: name: test_fmul_double
- ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0
- ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY %xmm1
+ ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1
; SSE: [[MULSDrr:%[0-9]+]]:fr64 = MULSDrr [[COPY]], [[COPY1]]
- ; SSE: %xmm0 = COPY [[MULSDrr]]
- ; SSE: RET 0, implicit %xmm0
+ ; SSE: $xmm0 = COPY [[MULSDrr]]
+ ; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fmul_double
- ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0
- ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY %xmm1
+ ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1
; AVX: [[VMULSDrr:%[0-9]+]]:fr64 = VMULSDrr [[COPY]], [[COPY1]]
- ; AVX: %xmm0 = COPY [[VMULSDrr]]
- ; AVX: RET 0, implicit %xmm0
+ ; AVX: $xmm0 = COPY [[VMULSDrr]]
+ ; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fmul_double
- ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY %xmm1
+ ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1
; AVX512F: [[VMULSDZrr:%[0-9]+]]:fr64x = VMULSDZrr [[COPY]], [[COPY1]]
- ; AVX512F: %xmm0 = COPY [[VMULSDZrr]]
- ; AVX512F: RET 0, implicit %xmm0
+ ; AVX512F: $xmm0 = COPY [[VMULSDZrr]]
+ ; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fmul_double
- ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY %xmm1
+ ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1
; AVX512VL: [[VMULSDZrr:%[0-9]+]]:fr64x = VMULSDZrr [[COPY]], [[COPY1]]
- ; AVX512VL: %xmm0 = COPY [[VMULSDZrr]]
- ; AVX512VL: RET 0, implicit %xmm0
- %0(s64) = COPY %xmm0
- %1(s64) = COPY %xmm1
+ ; AVX512VL: $xmm0 = COPY [[VMULSDZrr]]
+ ; AVX512VL: RET 0, implicit $xmm0
+ %0(s64) = COPY $xmm0
+ %1(s64) = COPY $xmm1
%2(s64) = G_FMUL %0, %1
- %xmm0 = COPY %2(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s64)
+ RET 0, implicit $xmm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-fpext-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/select-fpext-scalar.mir
index 00dfa6a..55b8721 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-fpext-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-fpext-scalar.mir
@@ -23,16 +23,16 @@
constants:
body: |
bb.1.entry:
- liveins: %xmm0
+ liveins: $xmm0
; ALL-LABEL: name: test
- ; ALL: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0
+ ; ALL: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
; ALL: [[CVTSS2SDrr:%[0-9]+]]:fr64 = CVTSS2SDrr [[COPY]]
- ; ALL: %xmm0 = COPY [[CVTSS2SDrr]]
- ; ALL: RET 0, implicit %xmm0
- %0(s32) = COPY %xmm0
+ ; ALL: $xmm0 = COPY [[CVTSS2SDrr]]
+ ; ALL: RET 0, implicit $xmm0
+ %0(s32) = COPY $xmm0
%1(s64) = G_FPEXT %0(s32)
- %xmm0 = COPY %1(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(s64)
+ RET 0, implicit $xmm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir
index 91d8b8e..cde8a9c 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir
@@ -34,37 +34,37 @@
#
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; SSE-LABEL: name: test_fsub_float
- ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0
- ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY %xmm1
+ ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1
; SSE: [[SUBSSrr:%[0-9]+]]:fr32 = SUBSSrr [[COPY]], [[COPY1]]
- ; SSE: %xmm0 = COPY [[SUBSSrr]]
- ; SSE: RET 0, implicit %xmm0
+ ; SSE: $xmm0 = COPY [[SUBSSrr]]
+ ; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fsub_float
- ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0
- ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY %xmm1
+ ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1
; AVX: [[VSUBSSrr:%[0-9]+]]:fr32 = VSUBSSrr [[COPY]], [[COPY1]]
- ; AVX: %xmm0 = COPY [[VSUBSSrr]]
- ; AVX: RET 0, implicit %xmm0
+ ; AVX: $xmm0 = COPY [[VSUBSSrr]]
+ ; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fsub_float
- ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY %xmm1
+ ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1
; AVX512F: [[VSUBSSZrr:%[0-9]+]]:fr32x = VSUBSSZrr [[COPY]], [[COPY1]]
- ; AVX512F: %xmm0 = COPY [[VSUBSSZrr]]
- ; AVX512F: RET 0, implicit %xmm0
+ ; AVX512F: $xmm0 = COPY [[VSUBSSZrr]]
+ ; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fsub_float
- ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY %xmm1
+ ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1
; AVX512VL: [[VSUBSSZrr:%[0-9]+]]:fr32x = VSUBSSZrr [[COPY]], [[COPY1]]
- ; AVX512VL: %xmm0 = COPY [[VSUBSSZrr]]
- ; AVX512VL: RET 0, implicit %xmm0
- %0(s32) = COPY %xmm0
- %1(s32) = COPY %xmm1
+ ; AVX512VL: $xmm0 = COPY [[VSUBSSZrr]]
+ ; AVX512VL: RET 0, implicit $xmm0
+ %0(s32) = COPY $xmm0
+ %1(s32) = COPY $xmm1
%2(s32) = G_FSUB %0, %1
- %xmm0 = COPY %2(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s32)
+ RET 0, implicit $xmm0
...
---
@@ -85,36 +85,36 @@
#
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; SSE-LABEL: name: test_fsub_double
- ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0
- ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY %xmm1
+ ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1
; SSE: [[SUBSDrr:%[0-9]+]]:fr64 = SUBSDrr [[COPY]], [[COPY1]]
- ; SSE: %xmm0 = COPY [[SUBSDrr]]
- ; SSE: RET 0, implicit %xmm0
+ ; SSE: $xmm0 = COPY [[SUBSDrr]]
+ ; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fsub_double
- ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0
- ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY %xmm1
+ ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1
; AVX: [[VSUBSDrr:%[0-9]+]]:fr64 = VSUBSDrr [[COPY]], [[COPY1]]
- ; AVX: %xmm0 = COPY [[VSUBSDrr]]
- ; AVX: RET 0, implicit %xmm0
+ ; AVX: $xmm0 = COPY [[VSUBSDrr]]
+ ; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fsub_double
- ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY %xmm1
+ ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1
; AVX512F: [[VSUBSDZrr:%[0-9]+]]:fr64x = VSUBSDZrr [[COPY]], [[COPY1]]
- ; AVX512F: %xmm0 = COPY [[VSUBSDZrr]]
- ; AVX512F: RET 0, implicit %xmm0
+ ; AVX512F: $xmm0 = COPY [[VSUBSDZrr]]
+ ; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fsub_double
- ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY %xmm1
+ ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1
; AVX512VL: [[VSUBSDZrr:%[0-9]+]]:fr64x = VSUBSDZrr [[COPY]], [[COPY1]]
- ; AVX512VL: %xmm0 = COPY [[VSUBSDZrr]]
- ; AVX512VL: RET 0, implicit %xmm0
- %0(s64) = COPY %xmm0
- %1(s64) = COPY %xmm1
+ ; AVX512VL: $xmm0 = COPY [[VSUBSDZrr]]
+ ; AVX512VL: RET 0, implicit $xmm0
+ %0(s64) = COPY $xmm0
+ %1(s64) = COPY $xmm1
%2(s64) = G_FSUB %0, %1
- %xmm0 = COPY %2(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s64)
+ RET 0, implicit $xmm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-gep.mir b/llvm/test/CodeGen/X86/GlobalISel/select-gep.mir
index b78afd2..cd60960 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-gep.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-gep.mir
@@ -19,18 +19,18 @@
- { id: 2, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; CHECK-LABEL: name: test_gep_i32
- ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; CHECK: [[MOV64ri32_:%[0-9]+]]:gr64_nosp = MOV64ri32 20
- ; CHECK: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri32_]], 0, %noreg
- ; CHECK: %rax = COPY [[LEA64r]]
- ; CHECK: RET 0, implicit %rax
- %0(p0) = COPY %rdi
+ ; CHECK: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri32_]], 0, $noreg
+ ; CHECK: $rax = COPY [[LEA64r]]
+ ; CHECK: RET 0, implicit $rax
+ %0(p0) = COPY $rdi
%1(s64) = G_CONSTANT i64 20
%2(p0) = G_GEP %0, %1(s64)
- %rax = COPY %2(p0)
- RET 0, implicit %rax
+ $rax = COPY %2(p0)
+ RET 0, implicit $rax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-inc.mir b/llvm/test/CodeGen/X86/GlobalISel/select-inc.mir
index b2cfa47..f336b08 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-inc.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-inc.mir
@@ -21,16 +21,16 @@
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# ALL: %0:gr8 = COPY %al
+# ALL: %0:gr8 = COPY $al
# INC-NEXT: %2:gr8 = INC8r %0
# ADD-NEXT: %2:gr8 = ADD8ri %0, 1
body: |
bb.1 (%ir-block.0):
- liveins: %al
+ liveins: $al
- %0(s8) = COPY %al
+ %0(s8) = COPY $al
%1(s8) = G_CONSTANT i8 1
%2(s8) = G_ADD %0, %1
- %al = COPY %2(s8)
+ $al = COPY %2(s8)
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec256.mir b/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec256.mir
index 744dfd6..f0d432a 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec256.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec256.mir
@@ -28,26 +28,26 @@
- { id: 0, class: vecr }
- { id: 1, class: vecr }
- { id: 2, class: vecr }
-# AVX: %0:vr256 = COPY %ymm0
-# AVX-NEXT: %1:vr128 = COPY %xmm1
+# AVX: %0:vr256 = COPY $ymm0
+# AVX-NEXT: %1:vr128 = COPY $xmm1
# AVX-NEXT: %2:vr256 = VINSERTF128rr %0, %1, 0
-# AVX-NEXT: %ymm0 = COPY %2
-# AVX-NEXT: RET 0, implicit %ymm0
+# AVX-NEXT: $ymm0 = COPY %2
+# AVX-NEXT: RET 0, implicit $ymm0
#
-# AVX512VL: %0:vr256x = COPY %ymm0
-# AVX512VL-NEXT: %1:vr128x = COPY %xmm1
+# AVX512VL: %0:vr256x = COPY $ymm0
+# AVX512VL-NEXT: %1:vr128x = COPY $xmm1
# AVX512VL-NEXT: %2:vr256x = VINSERTF32x4Z256rr %0, %1, 0
-# AVX512VL-NEXT: %ymm0 = COPY %2
-# AVX512VL-NEXT: RET 0, implicit %ymm0
+# AVX512VL-NEXT: $ymm0 = COPY %2
+# AVX512VL-NEXT: RET 0, implicit $ymm0
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<8 x s32>) = COPY %ymm0
- %1(<4 x s32>) = COPY %xmm1
+ %0(<8 x s32>) = COPY $ymm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<8 x s32>) = G_INSERT %0(<8 x s32>), %1(<4 x s32>), 0
- %ymm0 = COPY %2(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<8 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -60,24 +60,24 @@
- { id: 0, class: vecr }
- { id: 1, class: vecr }
- { id: 2, class: vecr }
-# AVX: %1:vr128 = COPY %xmm1
+# AVX: %1:vr128 = COPY $xmm1
# AVX-NEXT: undef %2.sub_xmm:vr256 = COPY %1
-# AVX-NEXT: %ymm0 = COPY %2
-# AVX-NEXT: RET 0, implicit %ymm0
+# AVX-NEXT: $ymm0 = COPY %2
+# AVX-NEXT: RET 0, implicit $ymm0
#
-# AVX512VL: %1:vr128x = COPY %xmm1
+# AVX512VL: %1:vr128x = COPY $xmm1
# AVX512VL-NEXT: undef %2.sub_xmm:vr256x = COPY %1
-# AVX512VL-NEXT: %ymm0 = COPY %2
-# AVX512VL-NEXT: RET 0, implicit %ymm0
+# AVX512VL-NEXT: $ymm0 = COPY %2
+# AVX512VL-NEXT: RET 0, implicit $ymm0
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
%0(<8 x s32>) = IMPLICIT_DEF
- %1(<4 x s32>) = COPY %xmm1
+ %1(<4 x s32>) = COPY $xmm1
%2(<8 x s32>) = G_INSERT %0(<8 x s32>), %1(<4 x s32>), 0
- %ymm0 = COPY %2(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<8 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -90,26 +90,26 @@
- { id: 0, class: vecr }
- { id: 1, class: vecr }
- { id: 2, class: vecr }
-# AVX: %0:vr256 = COPY %ymm0
-# AVX-NEXT: %1:vr128 = COPY %xmm1
+# AVX: %0:vr256 = COPY $ymm0
+# AVX-NEXT: %1:vr128 = COPY $xmm1
# AVX-NEXT: %2:vr256 = VINSERTF128rr %0, %1, 1
-# AVX-NEXT: %ymm0 = COPY %2
-# AVX-NEXT: RET 0, implicit %ymm0
+# AVX-NEXT: $ymm0 = COPY %2
+# AVX-NEXT: RET 0, implicit $ymm0
#
-# AVX512VL: %0:vr256x = COPY %ymm0
-# AVX512VL-NEXT: %1:vr128x = COPY %xmm1
+# AVX512VL: %0:vr256x = COPY $ymm0
+# AVX512VL-NEXT: %1:vr128x = COPY $xmm1
# AVX512VL-NEXT: %2:vr256x = VINSERTF32x4Z256rr %0, %1, 1
-# AVX512VL-NEXT: %ymm0 = COPY %2
-# AVX512VL-NEXT: RET 0, implicit %ymm0
+# AVX512VL-NEXT: $ymm0 = COPY %2
+# AVX512VL-NEXT: RET 0, implicit $ymm0
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<8 x s32>) = COPY %ymm0
- %1(<4 x s32>) = COPY %xmm1
+ %0(<8 x s32>) = COPY $ymm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<8 x s32>) = G_INSERT %0(<8 x s32>), %1(<4 x s32>), 128
- %ymm0 = COPY %2(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<8 x s32>)
+ RET 0, implicit $ymm0
...
---
name: test_insert_128_idx1_undef
@@ -122,23 +122,23 @@
- { id: 1, class: vecr }
- { id: 2, class: vecr }
# AVX: %0:vr256 = IMPLICIT_DEF
-# AVX-NEXT: %1:vr128 = COPY %xmm1
+# AVX-NEXT: %1:vr128 = COPY $xmm1
# AVX-NEXT: %2:vr256 = VINSERTF128rr %0, %1, 1
-# AVX-NEXT: %ymm0 = COPY %2
-# AVX-NEXT: RET 0, implicit %ymm0
+# AVX-NEXT: $ymm0 = COPY %2
+# AVX-NEXT: RET 0, implicit $ymm0
#
# AVX512VL: %0:vr256x = IMPLICIT_DEF
-# AVX512VL-NEXT: %1:vr128x = COPY %xmm1
+# AVX512VL-NEXT: %1:vr128x = COPY $xmm1
# AVX512VL-NEXT: %2:vr256x = VINSERTF32x4Z256rr %0, %1, 1
-# AVX512VL-NEXT: %ymm0 = COPY %2
-# AVX512VL-NEXT: RET 0, implicit %ymm0
+# AVX512VL-NEXT: $ymm0 = COPY %2
+# AVX512VL-NEXT: RET 0, implicit $ymm0
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
%0(<8 x s32>) = IMPLICIT_DEF
- %1(<4 x s32>) = COPY %xmm1
+ %1(<4 x s32>) = COPY $xmm1
%2(<8 x s32>) = G_INSERT %0(<8 x s32>), %1(<4 x s32>), 128
- %ymm0 = COPY %2(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<8 x s32>)
+ RET 0, implicit $ymm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec512.mir b/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec512.mir
index 45ed728..6b81d94 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec512.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec512.mir
@@ -46,19 +46,19 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %ymm1
+ liveins: $zmm0, $ymm1
; ALL-LABEL: name: test_insert_128_idx0
- ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:vr128x = COPY %xmm1
+ ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1
; ALL: [[VINSERTF32x4Zrr:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[COPY]], [[COPY1]], 0
- ; ALL: %zmm0 = COPY [[VINSERTF32x4Zrr]]
- ; ALL: RET 0, implicit %ymm0
- %0(<16 x s32>) = COPY %zmm0
- %1(<4 x s32>) = COPY %xmm1
+ ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrr]]
+ ; ALL: RET 0, implicit $ymm0
+ %0(<16 x s32>) = COPY $zmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<4 x s32>), 0
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %ymm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -72,18 +72,18 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; ALL-LABEL: name: test_insert_128_idx0_undef
- ; ALL: [[COPY:%[0-9]+]]:vr128x = COPY %xmm1
+ ; ALL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm1
; ALL: undef %2.sub_xmm:vr512 = COPY [[COPY]]
- ; ALL: %zmm0 = COPY %2
- ; ALL: RET 0, implicit %ymm0
+ ; ALL: $zmm0 = COPY %2
+ ; ALL: RET 0, implicit $ymm0
%0(<16 x s32>) = IMPLICIT_DEF
- %1(<4 x s32>) = COPY %xmm1
+ %1(<4 x s32>) = COPY $xmm1
%2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<4 x s32>), 0
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %ymm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -97,19 +97,19 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; ALL-LABEL: name: test_insert_128_idx1
- ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:vr128x = COPY %xmm1
+ ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1
; ALL: [[VINSERTF32x4Zrr:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[COPY]], [[COPY1]], 1
- ; ALL: %zmm0 = COPY [[VINSERTF32x4Zrr]]
- ; ALL: RET 0, implicit %ymm0
- %0(<16 x s32>) = COPY %zmm0
- %1(<4 x s32>) = COPY %xmm1
+ ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrr]]
+ ; ALL: RET 0, implicit $ymm0
+ %0(<16 x s32>) = COPY $zmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<4 x s32>), 128
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %ymm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $ymm0
...
---
name: test_insert_128_idx1_undef
@@ -122,19 +122,19 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; ALL-LABEL: name: test_insert_128_idx1_undef
; ALL: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF
- ; ALL: [[COPY:%[0-9]+]]:vr128x = COPY %xmm1
+ ; ALL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm1
; ALL: [[VINSERTF32x4Zrr:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[DEF]], [[COPY]], 1
- ; ALL: %zmm0 = COPY [[VINSERTF32x4Zrr]]
- ; ALL: RET 0, implicit %ymm0
+ ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrr]]
+ ; ALL: RET 0, implicit $ymm0
%0(<16 x s32>) = IMPLICIT_DEF
- %1(<4 x s32>) = COPY %xmm1
+ %1(<4 x s32>) = COPY $xmm1
%2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<4 x s32>), 128
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %ymm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $ymm0
...
---
name: test_insert_256_idx0
@@ -147,19 +147,19 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %ymm1
+ liveins: $zmm0, $ymm1
; ALL-LABEL: name: test_insert_256_idx0
- ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:vr256x = COPY %ymm1
+ ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1
; ALL: [[VINSERTF64x4Zrr:%[0-9]+]]:vr512 = VINSERTF64x4Zrr [[COPY]], [[COPY1]], 0
- ; ALL: %zmm0 = COPY [[VINSERTF64x4Zrr]]
- ; ALL: RET 0, implicit %ymm0
- %0(<16 x s32>) = COPY %zmm0
- %1(<8 x s32>) = COPY %ymm1
+ ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrr]]
+ ; ALL: RET 0, implicit $ymm0
+ %0(<16 x s32>) = COPY $zmm0
+ %1(<8 x s32>) = COPY $ymm1
%2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<8 x s32>), 0
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %ymm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -173,18 +173,18 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; ALL-LABEL: name: test_insert_256_idx0_undef
- ; ALL: [[COPY:%[0-9]+]]:vr256x = COPY %ymm1
+ ; ALL: [[COPY:%[0-9]+]]:vr256x = COPY $ymm1
; ALL: undef %2.sub_ymm:vr512 = COPY [[COPY]]
- ; ALL: %zmm0 = COPY %2
- ; ALL: RET 0, implicit %ymm0
+ ; ALL: $zmm0 = COPY %2
+ ; ALL: RET 0, implicit $ymm0
%0(<16 x s32>) = IMPLICIT_DEF
- %1(<8 x s32>) = COPY %ymm1
+ %1(<8 x s32>) = COPY $ymm1
%2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<8 x s32>), 0
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %ymm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -198,19 +198,19 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; ALL-LABEL: name: test_insert_256_idx1
- ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:vr256x = COPY %ymm1
+ ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1
; ALL: [[VINSERTF64x4Zrr:%[0-9]+]]:vr512 = VINSERTF64x4Zrr [[COPY]], [[COPY1]], 1
- ; ALL: %zmm0 = COPY [[VINSERTF64x4Zrr]]
- ; ALL: RET 0, implicit %ymm0
- %0(<16 x s32>) = COPY %zmm0
- %1(<8 x s32>) = COPY %ymm1
+ ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrr]]
+ ; ALL: RET 0, implicit $ymm0
+ %0(<16 x s32>) = COPY $zmm0
+ %1(<8 x s32>) = COPY $ymm1
%2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<8 x s32>), 256
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %ymm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $ymm0
...
---
name: test_insert_256_idx1_undef
@@ -223,17 +223,17 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; ALL-LABEL: name: test_insert_256_idx1_undef
; ALL: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF
- ; ALL: [[COPY:%[0-9]+]]:vr256x = COPY %ymm1
+ ; ALL: [[COPY:%[0-9]+]]:vr256x = COPY $ymm1
; ALL: [[VINSERTF64x4Zrr:%[0-9]+]]:vr512 = VINSERTF64x4Zrr [[DEF]], [[COPY]], 1
- ; ALL: %zmm0 = COPY [[VINSERTF64x4Zrr]]
- ; ALL: RET 0, implicit %ymm0
+ ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrr]]
+ ; ALL: RET 0, implicit $ymm0
%0(<16 x s32>) = IMPLICIT_DEF
- %1(<8 x s32>) = COPY %ymm1
+ %1(<8 x s32>) = COPY $ymm1
%2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<8 x s32>), 256
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %ymm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $ymm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-intrinsic-x86-flags-read-u32.mir b/llvm/test/CodeGen/X86/GlobalISel/select-intrinsic-x86-flags-read-u32.mir
index f36cecc..24af0eb 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-intrinsic-x86-flags-read-u32.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-intrinsic-x86-flags-read-u32.mir
@@ -9,8 +9,8 @@
define void @read_flags() { ret void }
; CHECK-LABEL: name: read_flags
; CHECK: bb.0:
- ; CHECK: [[RDFLAGS32_:%[0-9]+]]:gr32 = RDFLAGS32 implicit-def %esp, implicit %esp
- ; CHECK: %eax = COPY [[RDFLAGS32_]]
+ ; CHECK: [[RDFLAGS32_:%[0-9]+]]:gr32 = RDFLAGS32 implicit-def $esp, implicit $esp
+ ; CHECK: $eax = COPY [[RDFLAGS32_]]
...
---
@@ -24,5 +24,5 @@
body: |
bb.0:
%0(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.x86.flags.read.u32)
- %eax = COPY %0(s32)
+ $eax = COPY %0(s32)
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-leaf-constant.mir b/llvm/test/CodeGen/X86/GlobalISel/select-leaf-constant.mir
index 897f9a56..c0ff634 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-leaf-constant.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-leaf-constant.mir
@@ -34,11 +34,11 @@
bb.1 (%ir-block.0):
; CHECK-LABEL: name: const_i32_1
; CHECK: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 1
- ; CHECK: %eax = COPY [[MOV32ri]]
- ; CHECK: RET 0, implicit %eax
+ ; CHECK: $eax = COPY [[MOV32ri]]
+ ; CHECK: RET 0, implicit $eax
%0(s32) = G_CONSTANT i32 1
- %eax = COPY %0(s32)
- RET 0, implicit %eax
+ $eax = COPY %0(s32)
+ RET 0, implicit $eax
...
---
name: const_i32_1_optsize
@@ -50,12 +50,12 @@
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: const_i32_1_optsize
- ; CHECK: [[MOV32r1_:%[0-9]+]]:gr32 = MOV32r1 implicit-def %eflags
- ; CHECK: %eax = COPY [[MOV32r1_]]
- ; CHECK: RET 0, implicit %eax
+ ; CHECK: [[MOV32r1_:%[0-9]+]]:gr32 = MOV32r1 implicit-def $eflags
+ ; CHECK: $eax = COPY [[MOV32r1_]]
+ ; CHECK: RET 0, implicit $eax
%0(s32) = G_CONSTANT i32 1
- %eax = COPY %0(s32)
- RET 0, implicit %eax
+ $eax = COPY %0(s32)
+ RET 0, implicit $eax
...
---
name: const_i32_1b
@@ -68,11 +68,11 @@
bb.1 (%ir-block.0):
; CHECK-LABEL: name: const_i32_1b
; CHECK: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 1
- ; CHECK: %eax = COPY [[MOV32ri]]
- ; CHECK: RET 0, implicit %eax
+ ; CHECK: $eax = COPY [[MOV32ri]]
+ ; CHECK: RET 0, implicit $eax
%0(s32) = G_CONSTANT i32 1
- %eax = COPY %0(s32)
- RET 0, implicit %eax
+ $eax = COPY %0(s32)
+ RET 0, implicit $eax
...
---
name: const_i32_1_optsizeb
@@ -84,10 +84,10 @@
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: const_i32_1_optsizeb
- ; CHECK: [[MOV32r1_:%[0-9]+]]:gr32 = MOV32r1 implicit-def %eflags
- ; CHECK: %eax = COPY [[MOV32r1_]]
- ; CHECK: RET 0, implicit %eax
+ ; CHECK: [[MOV32r1_:%[0-9]+]]:gr32 = MOV32r1 implicit-def $eflags
+ ; CHECK: $eax = COPY [[MOV32r1_]]
+ ; CHECK: RET 0, implicit $eax
%0(s32) = G_CONSTANT i32 1
- %eax = COPY %0(s32)
- RET 0, implicit %eax
+ $eax = COPY %0(s32)
+ RET 0, implicit $eax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir b/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir
index 804d7bc..d9ee101 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir
@@ -57,15 +57,15 @@
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_load_i8
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.0, align 0)
- ; ALL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[MOV32rm]], 1, %noreg, 0, %noreg :: (load 1 from %ir.p1)
- ; ALL: %al = COPY [[MOV8rm]]
- ; ALL: RET 0, implicit %al
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[MOV32rm]], 1, $noreg, 0, $noreg :: (load 1 from %ir.p1)
+ ; ALL: $al = COPY [[MOV8rm]]
+ ; ALL: RET 0, implicit $al
%1(p0) = G_FRAME_INDEX %fixed-stack.0
%0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
%2(s8) = G_LOAD %0(p0) :: (load 1 from %ir.p1)
- %al = COPY %2(s8)
- RET 0, implicit %al
+ $al = COPY %2(s8)
+ RET 0, implicit $al
...
---
@@ -82,15 +82,15 @@
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_load_i16
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.0, align 0)
- ; ALL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[MOV32rm]], 1, %noreg, 0, %noreg :: (load 2 from %ir.p1)
- ; ALL: %ax = COPY [[MOV16rm]]
- ; ALL: RET 0, implicit %ax
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[MOV32rm]], 1, $noreg, 0, $noreg :: (load 2 from %ir.p1)
+ ; ALL: $ax = COPY [[MOV16rm]]
+ ; ALL: RET 0, implicit $ax
%1(p0) = G_FRAME_INDEX %fixed-stack.0
%0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
%2(s16) = G_LOAD %0(p0) :: (load 2 from %ir.p1)
- %ax = COPY %2(s16)
- RET 0, implicit %ax
+ $ax = COPY %2(s16)
+ RET 0, implicit $ax
...
---
@@ -107,15 +107,15 @@
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_load_i32
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.0, align 0)
- ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm [[MOV32rm]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
- ; ALL: %eax = COPY [[MOV32rm1]]
- ; ALL: RET 0, implicit %eax
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm [[MOV32rm]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; ALL: $eax = COPY [[MOV32rm1]]
+ ; ALL: RET 0, implicit $eax
%1(p0) = G_FRAME_INDEX %fixed-stack.0
%0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
%2(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -134,18 +134,18 @@
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_store_i8
- ; ALL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 1 from %fixed-stack.0, align 0)
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.1, align 0)
- ; ALL: MOV8mr [[MOV32rm]], 1, %noreg, 0, %noreg, [[MOV8rm]] :: (store 1 into %ir.p1)
- ; ALL: %eax = COPY [[MOV32rm]]
- ; ALL: RET 0, implicit %eax
+ ; ALL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; ALL: MOV8mr [[MOV32rm]], 1, $noreg, 0, $noreg, [[MOV8rm]] :: (store 1 into %ir.p1)
+ ; ALL: $eax = COPY [[MOV32rm]]
+ ; ALL: RET 0, implicit $eax
%2(p0) = G_FRAME_INDEX %fixed-stack.1
%0(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 0)
%3(p0) = G_FRAME_INDEX %fixed-stack.0
%1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
G_STORE %0(s8), %1(p0) :: (store 1 into %ir.p1)
- %eax = COPY %1(p0)
- RET 0, implicit %eax
+ $eax = COPY %1(p0)
+ RET 0, implicit $eax
...
---
@@ -164,18 +164,18 @@
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_store_i16
- ; ALL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 2 from %fixed-stack.0, align 0)
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.1, align 0)
- ; ALL: MOV16mr [[MOV32rm]], 1, %noreg, 0, %noreg, [[MOV16rm]] :: (store 2 into %ir.p1)
- ; ALL: %eax = COPY [[MOV32rm]]
- ; ALL: RET 0, implicit %eax
+ ; ALL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; ALL: MOV16mr [[MOV32rm]], 1, $noreg, 0, $noreg, [[MOV16rm]] :: (store 2 into %ir.p1)
+ ; ALL: $eax = COPY [[MOV32rm]]
+ ; ALL: RET 0, implicit $eax
%2(p0) = G_FRAME_INDEX %fixed-stack.1
%0(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 0)
%3(p0) = G_FRAME_INDEX %fixed-stack.0
%1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
G_STORE %0(s16), %1(p0) :: (store 2 into %ir.p1)
- %eax = COPY %1(p0)
- RET 0, implicit %eax
+ $eax = COPY %1(p0)
+ RET 0, implicit $eax
...
---
@@ -194,18 +194,18 @@
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_store_i32
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.0, align 0)
- ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.1, align 0)
- ; ALL: MOV32mr [[MOV32rm1]], 1, %noreg, 0, %noreg, [[MOV32rm]] :: (store 4 into %ir.p1)
- ; ALL: %eax = COPY [[MOV32rm1]]
- ; ALL: RET 0, implicit %eax
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; ALL: MOV32mr [[MOV32rm1]], 1, $noreg, 0, $noreg, [[MOV32rm]] :: (store 4 into %ir.p1)
+ ; ALL: $eax = COPY [[MOV32rm1]]
+ ; ALL: RET 0, implicit $eax
%2(p0) = G_FRAME_INDEX %fixed-stack.1
%0(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0)
%3(p0) = G_FRAME_INDEX %fixed-stack.0
%1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
G_STORE %0(s32), %1(p0) :: (store 4 into %ir.p1)
- %eax = COPY %1(p0)
- RET 0, implicit %eax
+ $eax = COPY %1(p0)
+ RET 0, implicit $eax
...
---
@@ -222,15 +222,15 @@
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_load_ptr
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.0, align 0)
- ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm [[MOV32rm]], 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr1)
- ; ALL: %eax = COPY [[MOV32rm1]]
- ; ALL: RET 0, implicit %eax
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm [[MOV32rm]], 1, $noreg, 0, $noreg :: (load 4 from %ir.ptr1)
+ ; ALL: $eax = COPY [[MOV32rm1]]
+ ; ALL: RET 0, implicit $eax
%1(p0) = G_FRAME_INDEX %fixed-stack.0
%0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
%2(p0) = G_LOAD %0(p0) :: (load 4 from %ir.ptr1)
- %eax = COPY %2(p0)
- RET 0, implicit %eax
+ $eax = COPY %2(p0)
+ RET 0, implicit $eax
...
---
@@ -249,9 +249,9 @@
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_store_ptr
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.0, align 0)
- ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.1, align 0)
- ; ALL: MOV32mr [[MOV32rm]], 1, %noreg, 0, %noreg, [[MOV32rm1]] :: (store 4 into %ir.ptr1)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; ALL: MOV32mr [[MOV32rm]], 1, $noreg, 0, $noreg, [[MOV32rm1]] :: (store 4 into %ir.ptr1)
; ALL: RET 0
%2(p0) = G_FRAME_INDEX %fixed-stack.1
%0(p0) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0)
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir
index c1600bd..d0d37f7 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir
@@ -109,32 +109,32 @@
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; SSE-LABEL: name: test_load_i8
- ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, %noreg, 0, %noreg :: (load 1 from %ir.p1)
- ; SSE: %al = COPY [[MOV8rm]]
- ; SSE: RET 0, implicit %al
+ ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load 1 from %ir.p1)
+ ; SSE: $al = COPY [[MOV8rm]]
+ ; SSE: RET 0, implicit $al
; AVX-LABEL: name: test_load_i8
- ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, %noreg, 0, %noreg :: (load 1 from %ir.p1)
- ; AVX: %al = COPY [[MOV8rm]]
- ; AVX: RET 0, implicit %al
+ ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load 1 from %ir.p1)
+ ; AVX: $al = COPY [[MOV8rm]]
+ ; AVX: RET 0, implicit $al
; AVX512F-LABEL: name: test_load_i8
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, %noreg, 0, %noreg :: (load 1 from %ir.p1)
- ; AVX512F: %al = COPY [[MOV8rm]]
- ; AVX512F: RET 0, implicit %al
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load 1 from %ir.p1)
+ ; AVX512F: $al = COPY [[MOV8rm]]
+ ; AVX512F: RET 0, implicit $al
; AVX512VL-LABEL: name: test_load_i8
- ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, %noreg, 0, %noreg :: (load 1 from %ir.p1)
- ; AVX512VL: %al = COPY [[MOV8rm]]
- ; AVX512VL: RET 0, implicit %al
- %0(p0) = COPY %rdi
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load 1 from %ir.p1)
+ ; AVX512VL: $al = COPY [[MOV8rm]]
+ ; AVX512VL: RET 0, implicit $al
+ %0(p0) = COPY $rdi
%1(s8) = G_LOAD %0(p0) :: (load 1 from %ir.p1)
- %al = COPY %1(s8)
- RET 0, implicit %al
+ $al = COPY %1(s8)
+ RET 0, implicit $al
...
---
@@ -147,32 +147,32 @@
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; SSE-LABEL: name: test_load_i16
- ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, %noreg, 0, %noreg :: (load 2 from %ir.p1)
- ; SSE: %ax = COPY [[MOV16rm]]
- ; SSE: RET 0, implicit %ax
+ ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load 2 from %ir.p1)
+ ; SSE: $ax = COPY [[MOV16rm]]
+ ; SSE: RET 0, implicit $ax
; AVX-LABEL: name: test_load_i16
- ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, %noreg, 0, %noreg :: (load 2 from %ir.p1)
- ; AVX: %ax = COPY [[MOV16rm]]
- ; AVX: RET 0, implicit %ax
+ ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load 2 from %ir.p1)
+ ; AVX: $ax = COPY [[MOV16rm]]
+ ; AVX: RET 0, implicit $ax
; AVX512F-LABEL: name: test_load_i16
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, %noreg, 0, %noreg :: (load 2 from %ir.p1)
- ; AVX512F: %ax = COPY [[MOV16rm]]
- ; AVX512F: RET 0, implicit %ax
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load 2 from %ir.p1)
+ ; AVX512F: $ax = COPY [[MOV16rm]]
+ ; AVX512F: RET 0, implicit $ax
; AVX512VL-LABEL: name: test_load_i16
- ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, %noreg, 0, %noreg :: (load 2 from %ir.p1)
- ; AVX512VL: %ax = COPY [[MOV16rm]]
- ; AVX512VL: RET 0, implicit %ax
- %0(p0) = COPY %rdi
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load 2 from %ir.p1)
+ ; AVX512VL: $ax = COPY [[MOV16rm]]
+ ; AVX512VL: RET 0, implicit $ax
+ %0(p0) = COPY $rdi
%1(s16) = G_LOAD %0(p0) :: (load 2 from %ir.p1)
- %ax = COPY %1(s16)
- RET 0, implicit %ax
+ $ax = COPY %1(s16)
+ RET 0, implicit $ax
...
---
@@ -185,32 +185,32 @@
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; SSE-LABEL: name: test_load_i32
- ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
- ; SSE: %eax = COPY [[MOV32rm]]
- ; SSE: RET 0, implicit %eax
+ ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; SSE: $eax = COPY [[MOV32rm]]
+ ; SSE: RET 0, implicit $eax
; AVX-LABEL: name: test_load_i32
- ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
- ; AVX: %eax = COPY [[MOV32rm]]
- ; AVX: RET 0, implicit %eax
+ ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; AVX: $eax = COPY [[MOV32rm]]
+ ; AVX: RET 0, implicit $eax
; AVX512F-LABEL: name: test_load_i32
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
- ; AVX512F: %eax = COPY [[MOV32rm]]
- ; AVX512F: RET 0, implicit %eax
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; AVX512F: $eax = COPY [[MOV32rm]]
+ ; AVX512F: RET 0, implicit $eax
; AVX512VL-LABEL: name: test_load_i32
- ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
- ; AVX512VL: %eax = COPY [[MOV32rm]]
- ; AVX512VL: RET 0, implicit %eax
- %0(p0) = COPY %rdi
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; AVX512VL: $eax = COPY [[MOV32rm]]
+ ; AVX512VL: RET 0, implicit $eax
+ %0(p0) = COPY $rdi
%1(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -223,32 +223,32 @@
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; SSE-LABEL: name: test_load_i64
- ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
- ; SSE: %rax = COPY [[MOV64rm]]
- ; SSE: RET 0, implicit %rax
+ ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; SSE: $rax = COPY [[MOV64rm]]
+ ; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_load_i64
- ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
- ; AVX: %rax = COPY [[MOV64rm]]
- ; AVX: RET 0, implicit %rax
+ ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; AVX: $rax = COPY [[MOV64rm]]
+ ; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_load_i64
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
- ; AVX512F: %rax = COPY [[MOV64rm]]
- ; AVX512F: RET 0, implicit %rax
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; AVX512F: $rax = COPY [[MOV64rm]]
+ ; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_load_i64
- ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
- ; AVX512VL: %rax = COPY [[MOV64rm]]
- ; AVX512VL: RET 0, implicit %rax
- %0(p0) = COPY %rdi
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; AVX512VL: $rax = COPY [[MOV64rm]]
+ ; AVX512VL: RET 0, implicit $rax
+ %0(p0) = COPY $rdi
%1(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1)
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
---
@@ -261,32 +261,32 @@
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; SSE-LABEL: name: test_load_float
- ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
- ; SSE: %xmm0 = COPY [[MOV32rm]]
- ; SSE: RET 0, implicit %xmm0
+ ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; SSE: $xmm0 = COPY [[MOV32rm]]
+ ; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_load_float
- ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
- ; AVX: %xmm0 = COPY [[MOV32rm]]
- ; AVX: RET 0, implicit %xmm0
+ ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; AVX: $xmm0 = COPY [[MOV32rm]]
+ ; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_load_float
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
- ; AVX512F: %xmm0 = COPY [[MOV32rm]]
- ; AVX512F: RET 0, implicit %xmm0
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; AVX512F: $xmm0 = COPY [[MOV32rm]]
+ ; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_load_float
- ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
- ; AVX512VL: %xmm0 = COPY [[MOV32rm]]
- ; AVX512VL: RET 0, implicit %xmm0
- %0(p0) = COPY %rdi
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; AVX512VL: $xmm0 = COPY [[MOV32rm]]
+ ; AVX512VL: RET 0, implicit $xmm0
+ %0(p0) = COPY $rdi
%1(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
- %xmm0 = COPY %1(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(s32)
+ RET 0, implicit $xmm0
...
---
@@ -299,32 +299,32 @@
- { id: 1, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; SSE-LABEL: name: test_load_float_vecreg
- ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
- ; SSE: %xmm0 = COPY [[MOVSSrm]]
- ; SSE: RET 0, implicit %xmm0
+ ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; SSE: $xmm0 = COPY [[MOVSSrm]]
+ ; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_load_float_vecreg
- ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: [[VMOVSSrm:%[0-9]+]]:fr32 = VMOVSSrm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
- ; AVX: %xmm0 = COPY [[VMOVSSrm]]
- ; AVX: RET 0, implicit %xmm0
+ ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[VMOVSSrm:%[0-9]+]]:fr32 = VMOVSSrm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; AVX: $xmm0 = COPY [[VMOVSSrm]]
+ ; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_load_float_vecreg
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[VMOVSSZrm:%[0-9]+]]:fr32x = VMOVSSZrm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
- ; AVX512F: %xmm0 = COPY [[VMOVSSZrm]]
- ; AVX512F: RET 0, implicit %xmm0
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[VMOVSSZrm:%[0-9]+]]:fr32x = VMOVSSZrm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; AVX512F: $xmm0 = COPY [[VMOVSSZrm]]
+ ; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_load_float_vecreg
- ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: [[VMOVSSZrm:%[0-9]+]]:fr32x = VMOVSSZrm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
- ; AVX512VL: %xmm0 = COPY [[VMOVSSZrm]]
- ; AVX512VL: RET 0, implicit %xmm0
- %0(p0) = COPY %rdi
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[VMOVSSZrm:%[0-9]+]]:fr32x = VMOVSSZrm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; AVX512VL: $xmm0 = COPY [[VMOVSSZrm]]
+ ; AVX512VL: RET 0, implicit $xmm0
+ %0(p0) = COPY $rdi
%1(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
- %xmm0 = COPY %1(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(s32)
+ RET 0, implicit $xmm0
...
---
@@ -337,32 +337,32 @@
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; SSE-LABEL: name: test_load_double
- ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
- ; SSE: %xmm0 = COPY [[MOV64rm]]
- ; SSE: RET 0, implicit %xmm0
+ ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; SSE: $xmm0 = COPY [[MOV64rm]]
+ ; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_load_double
- ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
- ; AVX: %xmm0 = COPY [[MOV64rm]]
- ; AVX: RET 0, implicit %xmm0
+ ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; AVX: $xmm0 = COPY [[MOV64rm]]
+ ; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_load_double
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
- ; AVX512F: %xmm0 = COPY [[MOV64rm]]
- ; AVX512F: RET 0, implicit %xmm0
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; AVX512F: $xmm0 = COPY [[MOV64rm]]
+ ; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_load_double
- ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
- ; AVX512VL: %xmm0 = COPY [[MOV64rm]]
- ; AVX512VL: RET 0, implicit %xmm0
- %0(p0) = COPY %rdi
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; AVX512VL: $xmm0 = COPY [[MOV64rm]]
+ ; AVX512VL: RET 0, implicit $xmm0
+ %0(p0) = COPY $rdi
%1(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1)
- %xmm0 = COPY %1(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(s64)
+ RET 0, implicit $xmm0
...
---
@@ -375,32 +375,32 @@
- { id: 1, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; SSE-LABEL: name: test_load_double_vecreg
- ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
- ; SSE: %xmm0 = COPY [[MOVSDrm]]
- ; SSE: RET 0, implicit %xmm0
+ ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; SSE: $xmm0 = COPY [[MOVSDrm]]
+ ; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_load_double_vecreg
- ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: [[VMOVSDrm:%[0-9]+]]:fr64 = VMOVSDrm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
- ; AVX: %xmm0 = COPY [[VMOVSDrm]]
- ; AVX: RET 0, implicit %xmm0
+ ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[VMOVSDrm:%[0-9]+]]:fr64 = VMOVSDrm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; AVX: $xmm0 = COPY [[VMOVSDrm]]
+ ; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_load_double_vecreg
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[VMOVSDZrm:%[0-9]+]]:fr64x = VMOVSDZrm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
- ; AVX512F: %xmm0 = COPY [[VMOVSDZrm]]
- ; AVX512F: RET 0, implicit %xmm0
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[VMOVSDZrm:%[0-9]+]]:fr64x = VMOVSDZrm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; AVX512F: $xmm0 = COPY [[VMOVSDZrm]]
+ ; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_load_double_vecreg
- ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: [[VMOVSDZrm:%[0-9]+]]:fr64x = VMOVSDZrm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
- ; AVX512VL: %xmm0 = COPY [[VMOVSDZrm]]
- ; AVX512VL: RET 0, implicit %xmm0
- %0(p0) = COPY %rdi
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[VMOVSDZrm:%[0-9]+]]:fr64x = VMOVSDZrm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; AVX512VL: $xmm0 = COPY [[VMOVSDZrm]]
+ ; AVX512VL: RET 0, implicit $xmm0
+ %0(p0) = COPY $rdi
%1(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1)
- %xmm0 = COPY %1(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(s64)
+ RET 0, implicit $xmm0
...
---
@@ -413,37 +413,37 @@
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %rsi
+ liveins: $edi, $rsi
; SSE-LABEL: name: test_store_i32
- ; SSE: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; SSE: MOV32mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 4 into %ir.p1)
- ; SSE: %rax = COPY [[COPY1]]
- ; SSE: RET 0, implicit %rax
+ ; SSE: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; SSE: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
+ ; SSE: $rax = COPY [[COPY1]]
+ ; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_store_i32
- ; AVX: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; AVX: MOV32mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 4 into %ir.p1)
- ; AVX: %rax = COPY [[COPY1]]
- ; AVX: RET 0, implicit %rax
+ ; AVX: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; AVX: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
+ ; AVX: $rax = COPY [[COPY1]]
+ ; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_store_i32
- ; AVX512F: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; AVX512F: MOV32mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 4 into %ir.p1)
- ; AVX512F: %rax = COPY [[COPY1]]
- ; AVX512F: RET 0, implicit %rax
+ ; AVX512F: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; AVX512F: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
+ ; AVX512F: $rax = COPY [[COPY1]]
+ ; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_store_i32
- ; AVX512VL: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; AVX512VL: MOV32mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 4 into %ir.p1)
- ; AVX512VL: %rax = COPY [[COPY1]]
- ; AVX512VL: RET 0, implicit %rax
- %0(s32) = COPY %edi
- %1(p0) = COPY %rsi
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; AVX512VL: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
+ ; AVX512VL: $rax = COPY [[COPY1]]
+ ; AVX512VL: RET 0, implicit $rax
+ %0(s32) = COPY $edi
+ %1(p0) = COPY $rsi
G_STORE %0(s32), %1(p0) :: (store 4 into %ir.p1)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
+ $rax = COPY %1(p0)
+ RET 0, implicit $rax
...
---
@@ -456,37 +456,37 @@
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
; SSE-LABEL: name: test_store_i64
- ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; SSE: MOV64mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 8 into %ir.p1)
- ; SSE: %rax = COPY [[COPY1]]
- ; SSE: RET 0, implicit %rax
+ ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; SSE: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
+ ; SSE: $rax = COPY [[COPY1]]
+ ; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_store_i64
- ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; AVX: MOV64mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 8 into %ir.p1)
- ; AVX: %rax = COPY [[COPY1]]
- ; AVX: RET 0, implicit %rax
+ ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; AVX: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
+ ; AVX: $rax = COPY [[COPY1]]
+ ; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_store_i64
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; AVX512F: MOV64mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 8 into %ir.p1)
- ; AVX512F: %rax = COPY [[COPY1]]
- ; AVX512F: RET 0, implicit %rax
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; AVX512F: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
+ ; AVX512F: $rax = COPY [[COPY1]]
+ ; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_store_i64
- ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; AVX512VL: MOV64mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 8 into %ir.p1)
- ; AVX512VL: %rax = COPY [[COPY1]]
- ; AVX512VL: RET 0, implicit %rax
- %0(s64) = COPY %rdi
- %1(p0) = COPY %rsi
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; AVX512VL: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
+ ; AVX512VL: $rax = COPY [[COPY1]]
+ ; AVX512VL: RET 0, implicit $rax
+ %0(s64) = COPY $rdi
+ %1(p0) = COPY $rsi
G_STORE %0(s64), %1(p0) :: (store 8 into %ir.p1)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
+ $rax = COPY %1(p0)
+ RET 0, implicit $rax
...
---
@@ -500,42 +500,42 @@
- { id: 2, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %xmm0
+ liveins: $rdi, $xmm0
; SSE-LABEL: name: test_store_float
- ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0
- ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
+ ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
; SSE: [[COPY2:%[0-9]+]]:gr32 = COPY [[COPY]]
- ; SSE: MOV32mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY2]] :: (store 4 into %ir.p1)
- ; SSE: %rax = COPY [[COPY1]]
- ; SSE: RET 0, implicit %rax
+ ; SSE: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 4 into %ir.p1)
+ ; SSE: $rax = COPY [[COPY1]]
+ ; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_store_float
- ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0
- ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
+ ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
; AVX: [[COPY2:%[0-9]+]]:gr32 = COPY [[COPY]]
- ; AVX: MOV32mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY2]] :: (store 4 into %ir.p1)
- ; AVX: %rax = COPY [[COPY1]]
- ; AVX: RET 0, implicit %rax
+ ; AVX: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 4 into %ir.p1)
+ ; AVX: $rax = COPY [[COPY1]]
+ ; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_store_float
- ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
+ ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
; AVX512F: [[COPY2:%[0-9]+]]:gr32 = COPY [[COPY]]
- ; AVX512F: MOV32mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY2]] :: (store 4 into %ir.p1)
- ; AVX512F: %rax = COPY [[COPY1]]
- ; AVX512F: RET 0, implicit %rax
+ ; AVX512F: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 4 into %ir.p1)
+ ; AVX512F: $rax = COPY [[COPY1]]
+ ; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_store_float
- ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
+ ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
; AVX512VL: [[COPY2:%[0-9]+]]:gr32 = COPY [[COPY]]
- ; AVX512VL: MOV32mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY2]] :: (store 4 into %ir.p1)
- ; AVX512VL: %rax = COPY [[COPY1]]
- ; AVX512VL: RET 0, implicit %rax
- %0(s32) = COPY %xmm0
- %1(p0) = COPY %rdi
+ ; AVX512VL: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 4 into %ir.p1)
+ ; AVX512VL: $rax = COPY [[COPY1]]
+ ; AVX512VL: RET 0, implicit $rax
+ %0(s32) = COPY $xmm0
+ %1(p0) = COPY $rdi
%2(s32) = COPY %0(s32)
G_STORE %2(s32), %1(p0) :: (store 4 into %ir.p1)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
+ $rax = COPY %1(p0)
+ RET 0, implicit $rax
...
---
@@ -548,37 +548,37 @@
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %xmm0
+ liveins: $rdi, $xmm0
; SSE-LABEL: name: test_store_float_vec
- ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0
- ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: MOVSSmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 4 into %ir.p1)
- ; SSE: %rax = COPY [[COPY1]]
- ; SSE: RET 0, implicit %rax
+ ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: MOVSSmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
+ ; SSE: $rax = COPY [[COPY1]]
+ ; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_store_float_vec
- ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0
- ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: VMOVSSmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 4 into %ir.p1)
- ; AVX: %rax = COPY [[COPY1]]
- ; AVX: RET 0, implicit %rax
+ ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: VMOVSSmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
+ ; AVX: $rax = COPY [[COPY1]]
+ ; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_store_float_vec
- ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: VMOVSSZmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 4 into %ir.p1)
- ; AVX512F: %rax = COPY [[COPY1]]
- ; AVX512F: RET 0, implicit %rax
+ ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: VMOVSSZmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
+ ; AVX512F: $rax = COPY [[COPY1]]
+ ; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_store_float_vec
- ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: VMOVSSZmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 4 into %ir.p1)
- ; AVX512VL: %rax = COPY [[COPY1]]
- ; AVX512VL: RET 0, implicit %rax
- %0(s32) = COPY %xmm0
- %1(p0) = COPY %rdi
+ ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: VMOVSSZmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
+ ; AVX512VL: $rax = COPY [[COPY1]]
+ ; AVX512VL: RET 0, implicit $rax
+ %0(s32) = COPY $xmm0
+ %1(p0) = COPY $rdi
G_STORE %0(s32), %1(p0) :: (store 4 into %ir.p1)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
+ $rax = COPY %1(p0)
+ RET 0, implicit $rax
...
---
@@ -590,45 +590,45 @@
- { id: 0, class: vecr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# NO_AVX512X: %0:fr64 = COPY %xmm0
+# NO_AVX512X: %0:fr64 = COPY $xmm0
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %xmm0
+ liveins: $rdi, $xmm0
; SSE-LABEL: name: test_store_double
- ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0
- ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
+ ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY [[COPY]]
- ; SSE: MOV64mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY2]] :: (store 8 into %ir.p1)
- ; SSE: %rax = COPY [[COPY1]]
- ; SSE: RET 0, implicit %rax
+ ; SSE: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 8 into %ir.p1)
+ ; SSE: $rax = COPY [[COPY1]]
+ ; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_store_double
- ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0
- ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
+ ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY [[COPY]]
- ; AVX: MOV64mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY2]] :: (store 8 into %ir.p1)
- ; AVX: %rax = COPY [[COPY1]]
- ; AVX: RET 0, implicit %rax
+ ; AVX: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 8 into %ir.p1)
+ ; AVX: $rax = COPY [[COPY1]]
+ ; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_store_double
- ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
+ ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY [[COPY]]
- ; AVX512F: MOV64mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY2]] :: (store 8 into %ir.p1)
- ; AVX512F: %rax = COPY [[COPY1]]
- ; AVX512F: RET 0, implicit %rax
+ ; AVX512F: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 8 into %ir.p1)
+ ; AVX512F: $rax = COPY [[COPY1]]
+ ; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_store_double
- ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
+ ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY [[COPY]]
- ; AVX512VL: MOV64mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY2]] :: (store 8 into %ir.p1)
- ; AVX512VL: %rax = COPY [[COPY1]]
- ; AVX512VL: RET 0, implicit %rax
- %0(s64) = COPY %xmm0
- %1(p0) = COPY %rdi
+ ; AVX512VL: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 8 into %ir.p1)
+ ; AVX512VL: $rax = COPY [[COPY1]]
+ ; AVX512VL: RET 0, implicit $rax
+ %0(s64) = COPY $xmm0
+ %1(p0) = COPY $rdi
%2(s64) = COPY %0(s64)
G_STORE %2(s64), %1(p0) :: (store 8 into %ir.p1)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
+ $rax = COPY %1(p0)
+ RET 0, implicit $rax
...
---
@@ -641,37 +641,37 @@
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %xmm0
+ liveins: $rdi, $xmm0
; SSE-LABEL: name: test_store_double_vec
- ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0
- ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: MOVSDmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 8 into %ir.p1)
- ; SSE: %rax = COPY [[COPY1]]
- ; SSE: RET 0, implicit %rax
+ ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: MOVSDmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
+ ; SSE: $rax = COPY [[COPY1]]
+ ; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_store_double_vec
- ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0
- ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: VMOVSDmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 8 into %ir.p1)
- ; AVX: %rax = COPY [[COPY1]]
- ; AVX: RET 0, implicit %rax
+ ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: VMOVSDmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
+ ; AVX: $rax = COPY [[COPY1]]
+ ; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_store_double_vec
- ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: VMOVSDZmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 8 into %ir.p1)
- ; AVX512F: %rax = COPY [[COPY1]]
- ; AVX512F: RET 0, implicit %rax
+ ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: VMOVSDZmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
+ ; AVX512F: $rax = COPY [[COPY1]]
+ ; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_store_double_vec
- ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: VMOVSDZmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 8 into %ir.p1)
- ; AVX512VL: %rax = COPY [[COPY1]]
- ; AVX512VL: RET 0, implicit %rax
- %0(s64) = COPY %xmm0
- %1(p0) = COPY %rdi
+ ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: VMOVSDZmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
+ ; AVX512VL: $rax = COPY [[COPY1]]
+ ; AVX512VL: RET 0, implicit $rax
+ %0(s64) = COPY $xmm0
+ %1(p0) = COPY $rdi
G_STORE %0(s64), %1(p0) :: (store 8 into %ir.p1)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
+ $rax = COPY %1(p0)
+ RET 0, implicit $rax
...
---
@@ -685,32 +685,32 @@
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; SSE-LABEL: name: test_load_ptr
- ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.ptr1)
- ; SSE: %rax = COPY [[MOV64rm]]
- ; SSE: RET 0, implicit %rax
+ ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.ptr1)
+ ; SSE: $rax = COPY [[MOV64rm]]
+ ; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_load_ptr
- ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.ptr1)
- ; AVX: %rax = COPY [[MOV64rm]]
- ; AVX: RET 0, implicit %rax
+ ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.ptr1)
+ ; AVX: $rax = COPY [[MOV64rm]]
+ ; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_load_ptr
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.ptr1)
- ; AVX512F: %rax = COPY [[MOV64rm]]
- ; AVX512F: RET 0, implicit %rax
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.ptr1)
+ ; AVX512F: $rax = COPY [[MOV64rm]]
+ ; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_load_ptr
- ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.ptr1)
- ; AVX512VL: %rax = COPY [[MOV64rm]]
- ; AVX512VL: RET 0, implicit %rax
- %0(p0) = COPY %rdi
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.ptr1)
+ ; AVX512VL: $rax = COPY [[MOV64rm]]
+ ; AVX512VL: RET 0, implicit $rax
+ %0(p0) = COPY $rdi
%1(p0) = G_LOAD %0(p0) :: (load 8 from %ir.ptr1)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
+ $rax = COPY %1(p0)
+ RET 0, implicit $rax
...
---
@@ -724,30 +724,30 @@
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
; SSE-LABEL: name: test_store_ptr
- ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; SSE: MOV64mr [[COPY]], 1, %noreg, 0, %noreg, [[COPY1]] :: (store 8 into %ir.ptr1)
+ ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; SSE: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 8 into %ir.ptr1)
; SSE: RET 0
; AVX-LABEL: name: test_store_ptr
- ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; AVX: MOV64mr [[COPY]], 1, %noreg, 0, %noreg, [[COPY1]] :: (store 8 into %ir.ptr1)
+ ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; AVX: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 8 into %ir.ptr1)
; AVX: RET 0
; AVX512F-LABEL: name: test_store_ptr
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; AVX512F: MOV64mr [[COPY]], 1, %noreg, 0, %noreg, [[COPY1]] :: (store 8 into %ir.ptr1)
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; AVX512F: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 8 into %ir.ptr1)
; AVX512F: RET 0
; AVX512VL-LABEL: name: test_store_ptr
- ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; AVX512VL: MOV64mr [[COPY]], 1, %noreg, 0, %noreg, [[COPY1]] :: (store 8 into %ir.ptr1)
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; AVX512VL: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 8 into %ir.ptr1)
; AVX512VL: RET 0
- %0(p0) = COPY %rdi
- %1(p0) = COPY %rsi
+ %0(p0) = COPY $rdi
+ %1(p0) = COPY $rsi
G_STORE %1(p0), %0(p0) :: (store 8 into %ir.ptr1)
RET 0
@@ -765,44 +765,44 @@
- { id: 4, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %esi, %rdi
+ liveins: $esi, $rdi
; SSE-LABEL: name: test_gep_folding
- ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; SSE: MOV32mr [[COPY]], 1, %noreg, 20, %noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
- ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 20, %noreg :: (load 4 from %ir.arrayidx)
- ; SSE: %eax = COPY [[MOV32rm]]
- ; SSE: RET 0, implicit %eax
+ ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; SSE: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
+ ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load 4 from %ir.arrayidx)
+ ; SSE: $eax = COPY [[MOV32rm]]
+ ; SSE: RET 0, implicit $eax
; AVX-LABEL: name: test_gep_folding
- ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; AVX: MOV32mr [[COPY]], 1, %noreg, 20, %noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
- ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 20, %noreg :: (load 4 from %ir.arrayidx)
- ; AVX: %eax = COPY [[MOV32rm]]
- ; AVX: RET 0, implicit %eax
+ ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; AVX: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
+ ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load 4 from %ir.arrayidx)
+ ; AVX: $eax = COPY [[MOV32rm]]
+ ; AVX: RET 0, implicit $eax
; AVX512F-LABEL: name: test_gep_folding
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; AVX512F: MOV32mr [[COPY]], 1, %noreg, 20, %noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
- ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 20, %noreg :: (load 4 from %ir.arrayidx)
- ; AVX512F: %eax = COPY [[MOV32rm]]
- ; AVX512F: RET 0, implicit %eax
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; AVX512F: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
+ ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load 4 from %ir.arrayidx)
+ ; AVX512F: $eax = COPY [[MOV32rm]]
+ ; AVX512F: RET 0, implicit $eax
; AVX512VL-LABEL: name: test_gep_folding
- ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; AVX512VL: MOV32mr [[COPY]], 1, %noreg, 20, %noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
- ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 20, %noreg :: (load 4 from %ir.arrayidx)
- ; AVX512VL: %eax = COPY [[MOV32rm]]
- ; AVX512VL: RET 0, implicit %eax
- %0(p0) = COPY %rdi
- %1(s32) = COPY %esi
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; AVX512VL: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
+ ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load 4 from %ir.arrayidx)
+ ; AVX512VL: $eax = COPY [[MOV32rm]]
+ ; AVX512VL: RET 0, implicit $eax
+ %0(p0) = COPY $rdi
+ %1(s32) = COPY $esi
%2(s64) = G_CONSTANT i64 20
%3(p0) = G_GEP %0, %2(s64)
G_STORE %1(s32), %3(p0) :: (store 4 into %ir.arrayidx)
%4(s32) = G_LOAD %3(p0) :: (load 4 from %ir.arrayidx)
- %eax = COPY %4(s32)
- RET 0, implicit %eax
+ $eax = COPY %4(s32)
+ RET 0, implicit $eax
...
---
@@ -818,51 +818,51 @@
- { id: 4, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %esi, %rdi
+ liveins: $esi, $rdi
; SSE-LABEL: name: test_gep_folding_largeGepIndex
- ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
+ ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; SSE: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 228719476720
- ; SSE: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, %noreg
- ; SSE: MOV32mr [[LEA64r]], 1, %noreg, 0, %noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
- ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, %noreg, 0, %noreg :: (load 4 from %ir.arrayidx)
- ; SSE: %eax = COPY [[MOV32rm]]
- ; SSE: RET 0, implicit %eax
+ ; SSE: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg
+ ; SSE: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
+ ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load 4 from %ir.arrayidx)
+ ; SSE: $eax = COPY [[MOV32rm]]
+ ; SSE: RET 0, implicit $eax
; AVX-LABEL: name: test_gep_folding_largeGepIndex
- ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
+ ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; AVX: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 228719476720
- ; AVX: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, %noreg
- ; AVX: MOV32mr [[LEA64r]], 1, %noreg, 0, %noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
- ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, %noreg, 0, %noreg :: (load 4 from %ir.arrayidx)
- ; AVX: %eax = COPY [[MOV32rm]]
- ; AVX: RET 0, implicit %eax
+ ; AVX: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg
+ ; AVX: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
+ ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load 4 from %ir.arrayidx)
+ ; AVX: $eax = COPY [[MOV32rm]]
+ ; AVX: RET 0, implicit $eax
; AVX512F-LABEL: name: test_gep_folding_largeGepIndex
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; AVX512F: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 228719476720
- ; AVX512F: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, %noreg
- ; AVX512F: MOV32mr [[LEA64r]], 1, %noreg, 0, %noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
- ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, %noreg, 0, %noreg :: (load 4 from %ir.arrayidx)
- ; AVX512F: %eax = COPY [[MOV32rm]]
- ; AVX512F: RET 0, implicit %eax
+ ; AVX512F: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg
+ ; AVX512F: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
+ ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load 4 from %ir.arrayidx)
+ ; AVX512F: $eax = COPY [[MOV32rm]]
+ ; AVX512F: RET 0, implicit $eax
; AVX512VL-LABEL: name: test_gep_folding_largeGepIndex
- ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; AVX512VL: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 228719476720
- ; AVX512VL: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, %noreg
- ; AVX512VL: MOV32mr [[LEA64r]], 1, %noreg, 0, %noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
- ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, %noreg, 0, %noreg :: (load 4 from %ir.arrayidx)
- ; AVX512VL: %eax = COPY [[MOV32rm]]
- ; AVX512VL: RET 0, implicit %eax
- %0(p0) = COPY %rdi
- %1(s32) = COPY %esi
+ ; AVX512VL: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg
+ ; AVX512VL: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
+ ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load 4 from %ir.arrayidx)
+ ; AVX512VL: $eax = COPY [[MOV32rm]]
+ ; AVX512VL: RET 0, implicit $eax
+ %0(p0) = COPY $rdi
+ %1(s32) = COPY $esi
%2(s64) = G_CONSTANT i64 228719476720
%3(p0) = G_GEP %0, %2(s64)
G_STORE %1(s32), %3(p0) :: (store 4 into %ir.arrayidx)
%4(s32) = G_LOAD %3(p0) :: (load 4 from %ir.arrayidx)
- %eax = COPY %4(s32)
- RET 0, implicit %eax
+ $eax = COPY %4(s32)
+ RET 0, implicit $eax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-memop-v128.mir b/llvm/test/CodeGen/X86/GlobalISel/select-memop-v128.mir
index 7a3647c..f6c1e86 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-memop-v128.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-memop-v128.mir
@@ -34,20 +34,20 @@
registers:
- { id: 0, class: gpr }
- { id: 1, class: vecr }
-# ALL: %0:gr64 = COPY %rdi
-# SSE: %1:vr128 = MOVUPSrm %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1, align 1)
-# AVX: %1:vr128 = VMOVUPSrm %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1, align 1)
-# AVX512F: %1:vr128x = VMOVUPSZ128rm_NOVLX %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1, align 1)
-# AVX512VL: %1:vr128x = VMOVUPSZ128rm %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1, align 1)
-# ALL: %xmm0 = COPY %1
+# ALL: %0:gr64 = COPY $rdi
+# SSE: %1:vr128 = MOVUPSrm %0, 1, $noreg, 0, $noreg :: (load 16 from %ir.p1, align 1)
+# AVX: %1:vr128 = VMOVUPSrm %0, 1, $noreg, 0, $noreg :: (load 16 from %ir.p1, align 1)
+# AVX512F: %1:vr128x = VMOVUPSZ128rm_NOVLX %0, 1, $noreg, 0, $noreg :: (load 16 from %ir.p1, align 1)
+# AVX512VL: %1:vr128x = VMOVUPSZ128rm %0, 1, $noreg, 0, $noreg :: (load 16 from %ir.p1, align 1)
+# ALL: $xmm0 = COPY %1
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
- %0(p0) = COPY %rdi
+ %0(p0) = COPY $rdi
%1(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.p1, align 1)
- %xmm0 = COPY %1(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -59,20 +59,20 @@
registers:
- { id: 0, class: gpr }
- { id: 1, class: vecr }
-# ALL: %0:gr64 = COPY %rdi
-# SSE: %1:vr128 = MOVAPSrm %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1)
-# AVX: %1:vr128 = VMOVAPSrm %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1)
-# AVX512F: %1:vr128x = VMOVAPSZ128rm_NOVLX %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1)
-# AVX512VL: %1:vr128x = VMOVAPSZ128rm %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1)
-# ALL: %xmm0 = COPY %1
+# ALL: %0:gr64 = COPY $rdi
+# SSE: %1:vr128 = MOVAPSrm %0, 1, $noreg, 0, $noreg :: (load 16 from %ir.p1)
+# AVX: %1:vr128 = VMOVAPSrm %0, 1, $noreg, 0, $noreg :: (load 16 from %ir.p1)
+# AVX512F: %1:vr128x = VMOVAPSZ128rm_NOVLX %0, 1, $noreg, 0, $noreg :: (load 16 from %ir.p1)
+# AVX512VL: %1:vr128x = VMOVAPSZ128rm %0, 1, $noreg, 0, $noreg :: (load 16 from %ir.p1)
+# ALL: $xmm0 = COPY %1
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
- %0(p0) = COPY %rdi
+ %0(p0) = COPY $rdi
%1(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.p1)
- %xmm0 = COPY %1(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -84,23 +84,23 @@
registers:
- { id: 0, class: vecr }
- { id: 1, class: gpr }
-# NO_AVX512F: %0:vr128 = COPY %xmm0
-# AVX512ALL: %0:vr128x = COPY %xmm0
-# ALL: %1:gr64 = COPY %rdi
-# SSE: MOVAPSmr %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1)
-# AVX: VMOVAPSmr %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1)
-# AVX512F: VMOVAPSZ128mr_NOVLX %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1)
-# AVX512VL: VMOVAPSZ128mr %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1)
-# ALL: %rax = COPY %1
+# NO_AVX512F: %0:vr128 = COPY $xmm0
+# AVX512ALL: %0:vr128x = COPY $xmm0
+# ALL: %1:gr64 = COPY $rdi
+# SSE: MOVAPSmr %1, 1, $noreg, 0, $noreg, %0 :: (store 16 into %ir.p1)
+# AVX: VMOVAPSmr %1, 1, $noreg, 0, $noreg, %0 :: (store 16 into %ir.p1)
+# AVX512F: VMOVAPSZ128mr_NOVLX %1, 1, $noreg, 0, $noreg, %0 :: (store 16 into %ir.p1)
+# AVX512VL: VMOVAPSZ128mr %1, 1, $noreg, 0, $noreg, %0 :: (store 16 into %ir.p1)
+# ALL: $rax = COPY %1
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %xmm0
+ liveins: $rdi, $xmm0
- %0(<4 x s32>) = COPY %xmm0
- %1(p0) = COPY %rdi
+ %0(<4 x s32>) = COPY $xmm0
+ %1(p0) = COPY $rdi
G_STORE %0(<4 x s32>), %1(p0) :: (store 16 into %ir.p1, align 16)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
+ $rax = COPY %1(p0)
+ RET 0, implicit $rax
...
---
@@ -112,22 +112,22 @@
registers:
- { id: 0, class: vecr }
- { id: 1, class: gpr }
-# NO_AVX512F: %0:vr128 = COPY %xmm0
-# AVX512ALL: %0:vr128x = COPY %xmm0
-# ALL: %1:gr64 = COPY %rdi
-# SSE: MOVUPSmr %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1, align 1)
-# AVX: VMOVUPSmr %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1, align 1)
-# AVX512F: VMOVUPSZ128mr_NOVLX %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1, align 1)
-# AVX512VL: VMOVUPSZ128mr %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1, align 1)
-# ALL: %rax = COPY %1
+# NO_AVX512F: %0:vr128 = COPY $xmm0
+# AVX512ALL: %0:vr128x = COPY $xmm0
+# ALL: %1:gr64 = COPY $rdi
+# SSE: MOVUPSmr %1, 1, $noreg, 0, $noreg, %0 :: (store 16 into %ir.p1, align 1)
+# AVX: VMOVUPSmr %1, 1, $noreg, 0, $noreg, %0 :: (store 16 into %ir.p1, align 1)
+# AVX512F: VMOVUPSZ128mr_NOVLX %1, 1, $noreg, 0, $noreg, %0 :: (store 16 into %ir.p1, align 1)
+# AVX512VL: VMOVUPSZ128mr %1, 1, $noreg, 0, $noreg, %0 :: (store 16 into %ir.p1, align 1)
+# ALL: $rax = COPY %1
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %xmm0
+ liveins: $rdi, $xmm0
- %0(<4 x s32>) = COPY %xmm0
- %1(p0) = COPY %rdi
+ %0(<4 x s32>) = COPY $xmm0
+ %1(p0) = COPY $rdi
G_STORE %0(<4 x s32>), %1(p0) :: (store 16 into %ir.p1, align 1)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
+ $rax = COPY %1(p0)
+ RET 0, implicit $rax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-memop-v256.mir b/llvm/test/CodeGen/X86/GlobalISel/select-memop-v256.mir
index 962201f..d4b56a0 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-memop-v256.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-memop-v256.mir
@@ -42,28 +42,28 @@
registers:
- { id: 0, class: gpr }
- { id: 1, class: vecr }
-# NO_AVX512F: %0:gr64 = COPY %rdi
-# NO_AVX512F-NEXT: %1:vr256 = VMOVUPSYrm %0, 1, %noreg, 0, %noreg :: (load 32 from %ir.p1, align 1)
-# NO_AVX512F-NEXT: %ymm0 = COPY %1
-# NO_AVX512F-NEXT: RET 0, implicit %ymm0
+# NO_AVX512F: %0:gr64 = COPY $rdi
+# NO_AVX512F-NEXT: %1:vr256 = VMOVUPSYrm %0, 1, $noreg, 0, $noreg :: (load 32 from %ir.p1, align 1)
+# NO_AVX512F-NEXT: $ymm0 = COPY %1
+# NO_AVX512F-NEXT: RET 0, implicit $ymm0
#
-# AVX512F: %0:gr64 = COPY %rdi
-# AVX512F-NEXT: %1:vr256x = VMOVUPSZ256rm_NOVLX %0, 1, %noreg, 0, %noreg :: (load 32 from %ir.p1, align 1)
-# AVX512F-NEXT: %ymm0 = COPY %1
-# AVX512F-NEXT: RET 0, implicit %ymm0
+# AVX512F: %0:gr64 = COPY $rdi
+# AVX512F-NEXT: %1:vr256x = VMOVUPSZ256rm_NOVLX %0, 1, $noreg, 0, $noreg :: (load 32 from %ir.p1, align 1)
+# AVX512F-NEXT: $ymm0 = COPY %1
+# AVX512F-NEXT: RET 0, implicit $ymm0
#
-# AVX512VL: %0:gr64 = COPY %rdi
-# AVX512VL-NEXT: %1:vr256x = VMOVUPSZ256rm %0, 1, %noreg, 0, %noreg :: (load 32 from %ir.p1, align 1)
-# AVX512VL-NEXT: %ymm0 = COPY %1
-# AVX512VL-NEXT: RET 0, implicit %ymm0
+# AVX512VL: %0:gr64 = COPY $rdi
+# AVX512VL-NEXT: %1:vr256x = VMOVUPSZ256rm %0, 1, $noreg, 0, $noreg :: (load 32 from %ir.p1, align 1)
+# AVX512VL-NEXT: $ymm0 = COPY %1
+# AVX512VL-NEXT: RET 0, implicit $ymm0
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
- %0(p0) = COPY %rdi
+ %0(p0) = COPY $rdi
%1(<8 x s32>) = G_LOAD %0(p0) :: (load 32 from %ir.p1, align 1)
- %ymm0 = COPY %1(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %1(<8 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -75,28 +75,28 @@
registers:
- { id: 0, class: gpr }
- { id: 1, class: vecr }
-# NO_AVX512F: %0:gr64 = COPY %rdi
-# NO_AVX512F-NEXT: %1:vr256 = VMOVAPSYrm %0, 1, %noreg, 0, %noreg :: (load 32 from %ir.p1)
-# NO_AVX512F-NEXT: %ymm0 = COPY %1
-# NO_AVX512F-NEXT: RET 0, implicit %ymm0
+# NO_AVX512F: %0:gr64 = COPY $rdi
+# NO_AVX512F-NEXT: %1:vr256 = VMOVAPSYrm %0, 1, $noreg, 0, $noreg :: (load 32 from %ir.p1)
+# NO_AVX512F-NEXT: $ymm0 = COPY %1
+# NO_AVX512F-NEXT: RET 0, implicit $ymm0
#
-# AVX512F: %0:gr64 = COPY %rdi
-# AVX512F-NEXT: %1:vr256x = VMOVAPSZ256rm_NOVLX %0, 1, %noreg, 0, %noreg :: (load 32 from %ir.p1)
-# AVX512F-NEXT: %ymm0 = COPY %1
-# AVX512F-NEXT: RET 0, implicit %ymm0
+# AVX512F: %0:gr64 = COPY $rdi
+# AVX512F-NEXT: %1:vr256x = VMOVAPSZ256rm_NOVLX %0, 1, $noreg, 0, $noreg :: (load 32 from %ir.p1)
+# AVX512F-NEXT: $ymm0 = COPY %1
+# AVX512F-NEXT: RET 0, implicit $ymm0
#
-# AVX512VL: %0:gr64 = COPY %rdi
-# AVX512VL-NEXT: %1:vr256x = VMOVAPSZ256rm %0, 1, %noreg, 0, %noreg :: (load 32 from %ir.p1)
-# AVX512VL-NEXT: %ymm0 = COPY %1
-# AVX512VL-NEXT: RET 0, implicit %ymm0
+# AVX512VL: %0:gr64 = COPY $rdi
+# AVX512VL-NEXT: %1:vr256x = VMOVAPSZ256rm %0, 1, $noreg, 0, $noreg :: (load 32 from %ir.p1)
+# AVX512VL-NEXT: $ymm0 = COPY %1
+# AVX512VL-NEXT: RET 0, implicit $ymm0
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
- %0(p0) = COPY %rdi
+ %0(p0) = COPY $rdi
%1(<8 x s32>) = G_LOAD %0(p0) :: (load 32 from %ir.p1)
- %ymm0 = COPY %1(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %1(<8 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -115,26 +115,26 @@
registers:
- { id: 0, class: vecr }
- { id: 1, class: gpr }
-# NO_AVX512F: %0:vr256 = COPY %ymm0
-# NO_AVX512F-NEXT: %1:gr64 = COPY %rdi
-# NO_AVX512F-NEXT: VMOVUPSYmr %1, 1, %noreg, 0, %noreg, %0 :: (store 32 into %ir.p1, align 1)
+# NO_AVX512F: %0:vr256 = COPY $ymm0
+# NO_AVX512F-NEXT: %1:gr64 = COPY $rdi
+# NO_AVX512F-NEXT: VMOVUPSYmr %1, 1, $noreg, 0, $noreg, %0 :: (store 32 into %ir.p1, align 1)
# NO_AVX512F-NEXT: RET 0
#
-# AVX512F: %0:vr256x = COPY %ymm0
-# AVX512F-NEXT: %1:gr64 = COPY %rdi
-# AVX512F-NEXT: VMOVUPSZ256mr_NOVLX %1, 1, %noreg, 0, %noreg, %0 :: (store 32 into %ir.p1, align 1)
+# AVX512F: %0:vr256x = COPY $ymm0
+# AVX512F-NEXT: %1:gr64 = COPY $rdi
+# AVX512F-NEXT: VMOVUPSZ256mr_NOVLX %1, 1, $noreg, 0, $noreg, %0 :: (store 32 into %ir.p1, align 1)
# AVX512F-NEXT: RET 0
#
-# AVX512VL: %0:vr256x = COPY %ymm0
-# AVX512VL-NEXT: %1:gr64 = COPY %rdi
-# AVX512VL-NEXT: VMOVUPSZ256mr %1, 1, %noreg, 0, %noreg, %0 :: (store 32 into %ir.p1, align 1)
+# AVX512VL: %0:vr256x = COPY $ymm0
+# AVX512VL-NEXT: %1:gr64 = COPY $rdi
+# AVX512VL-NEXT: VMOVUPSZ256mr %1, 1, $noreg, 0, $noreg, %0 :: (store 32 into %ir.p1, align 1)
# AVX512VL-NEXT: RET 0
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %ymm0
+ liveins: $rdi, $ymm0
- %0(<8 x s32>) = COPY %ymm0
- %1(p0) = COPY %rdi
+ %0(<8 x s32>) = COPY $ymm0
+ %1(p0) = COPY $rdi
G_STORE %0(<8 x s32>), %1(p0) :: (store 32 into %ir.p1, align 1)
RET 0
@@ -155,26 +155,26 @@
registers:
- { id: 0, class: vecr }
- { id: 1, class: gpr }
-# NO_AVX512F: %0:vr256 = COPY %ymm0
-# NO_AVX512F-NEXT: %1:gr64 = COPY %rdi
-# NO_AVX512F-NEXT: VMOVAPSYmr %1, 1, %noreg, 0, %noreg, %0 :: (store 32 into %ir.p1)
+# NO_AVX512F: %0:vr256 = COPY $ymm0
+# NO_AVX512F-NEXT: %1:gr64 = COPY $rdi
+# NO_AVX512F-NEXT: VMOVAPSYmr %1, 1, $noreg, 0, $noreg, %0 :: (store 32 into %ir.p1)
# NO_AVX512F-NEXT: RET 0
#
-# AVX512F: %0:vr256x = COPY %ymm0
-# AVX512F-NEXT: %1:gr64 = COPY %rdi
-# AVX512F-NEXT: VMOVAPSZ256mr_NOVLX %1, 1, %noreg, 0, %noreg, %0 :: (store 32 into %ir.p1)
+# AVX512F: %0:vr256x = COPY $ymm0
+# AVX512F-NEXT: %1:gr64 = COPY $rdi
+# AVX512F-NEXT: VMOVAPSZ256mr_NOVLX %1, 1, $noreg, 0, $noreg, %0 :: (store 32 into %ir.p1)
# AVX512F-NEXT: RET 0
#
-# AVX512VL: %0:vr256x = COPY %ymm0
-# AVX512VL-NEXT: %1:gr64 = COPY %rdi
-# AVX512VL-NEXT: VMOVAPSZ256mr %1, 1, %noreg, 0, %noreg, %0 :: (store 32 into %ir.p1)
+# AVX512VL: %0:vr256x = COPY $ymm0
+# AVX512VL-NEXT: %1:gr64 = COPY $rdi
+# AVX512VL-NEXT: VMOVAPSZ256mr %1, 1, $noreg, 0, $noreg, %0 :: (store 32 into %ir.p1)
# AVX512VL-NEXT: RET 0
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %ymm0
+ liveins: $rdi, $ymm0
- %0(<8 x s32>) = COPY %ymm0
- %1(p0) = COPY %rdi
+ %0(<8 x s32>) = COPY $ymm0
+ %1(p0) = COPY $rdi
G_STORE %0(<8 x s32>), %1(p0) :: (store 32 into %ir.p1)
RET 0
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-memop-v512.mir b/llvm/test/CodeGen/X86/GlobalISel/select-memop-v512.mir
index 8be5c94..8512247 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-memop-v512.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-memop-v512.mir
@@ -32,17 +32,17 @@
- { id: 1, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; AVX512F-LABEL: name: test_load_v16i32_noalign
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[VMOVUPSZrm:%[0-9]+]]:vr512 = VMOVUPSZrm [[COPY]], 1, %noreg, 0, %noreg :: (load 64 from %ir.p1, align 1)
- ; AVX512F: %zmm0 = COPY [[VMOVUPSZrm]]
- ; AVX512F: RET 0, implicit %zmm0
- %0(p0) = COPY %rdi
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[VMOVUPSZrm:%[0-9]+]]:vr512 = VMOVUPSZrm [[COPY]], 1, $noreg, 0, $noreg :: (load 64 from %ir.p1, align 1)
+ ; AVX512F: $zmm0 = COPY [[VMOVUPSZrm]]
+ ; AVX512F: RET 0, implicit $zmm0
+ %0(p0) = COPY $rdi
%1(<16 x s32>) = G_LOAD %0(p0) :: (load 64 from %ir.p1, align 1)
- %zmm0 = COPY %1(<16 x s32>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %1(<16 x s32>)
+ RET 0, implicit $zmm0
...
---
@@ -55,17 +55,17 @@
- { id: 1, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; AVX512F-LABEL: name: test_load_v16i32_align
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[VMOVUPSZrm:%[0-9]+]]:vr512 = VMOVUPSZrm [[COPY]], 1, %noreg, 0, %noreg :: (load 64 from %ir.p1, align 32)
- ; AVX512F: %zmm0 = COPY [[VMOVUPSZrm]]
- ; AVX512F: RET 0, implicit %zmm0
- %0(p0) = COPY %rdi
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[VMOVUPSZrm:%[0-9]+]]:vr512 = VMOVUPSZrm [[COPY]], 1, $noreg, 0, $noreg :: (load 64 from %ir.p1, align 32)
+ ; AVX512F: $zmm0 = COPY [[VMOVUPSZrm]]
+ ; AVX512F: RET 0, implicit $zmm0
+ %0(p0) = COPY $rdi
%1(<16 x s32>) = G_LOAD %0(p0) :: (load 64 from %ir.p1, align 32)
- %zmm0 = COPY %1(<16 x s32>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %1(<16 x s32>)
+ RET 0, implicit $zmm0
...
---
@@ -78,15 +78,15 @@
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %zmm0
+ liveins: $rdi, $zmm0
; AVX512F-LABEL: name: test_store_v16i32_noalign
- ; AVX512F: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: VMOVUPSZmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 64 into %ir.p1, align 1)
+ ; AVX512F: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: VMOVUPSZmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 64 into %ir.p1, align 1)
; AVX512F: RET 0
- %0(<16 x s32>) = COPY %zmm0
- %1(p0) = COPY %rdi
+ %0(<16 x s32>) = COPY $zmm0
+ %1(p0) = COPY $rdi
G_STORE %0(<16 x s32>), %1(p0) :: (store 64 into %ir.p1, align 1)
RET 0
@@ -101,15 +101,15 @@
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %zmm0
+ liveins: $rdi, $zmm0
; AVX512F-LABEL: name: test_store_v16i32_align
- ; AVX512F: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: VMOVUPSZmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 64 into %ir.p1, align 32)
+ ; AVX512F: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: VMOVUPSZmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 64 into %ir.p1, align 32)
; AVX512F: RET 0
- %0(<16 x s32>) = COPY %zmm0
- %1(p0) = COPY %rdi
+ %0(<16 x s32>) = COPY $zmm0
+ %1(p0) = COPY $rdi
G_STORE %0(<16 x s32>), %1(p0) :: (store 64 into %ir.p1, align 32)
RET 0
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir b/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir
index 14b82ef..6690885 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir
@@ -24,18 +24,18 @@
; AVX: [[DEF:%[0-9]+]]:vr128 = IMPLICIT_DEF
; AVX: undef %2.sub_xmm:vr256 = COPY [[DEF]]
; AVX: [[VINSERTF128rr:%[0-9]+]]:vr256 = VINSERTF128rr %2, [[DEF]], 1
- ; AVX: %ymm0 = COPY [[VINSERTF128rr]]
- ; AVX: RET 0, implicit %ymm0
+ ; AVX: $ymm0 = COPY [[VINSERTF128rr]]
+ ; AVX: RET 0, implicit $ymm0
; AVX512VL-LABEL: name: test_merge
; AVX512VL: [[DEF:%[0-9]+]]:vr128x = IMPLICIT_DEF
; AVX512VL: undef %2.sub_xmm:vr256x = COPY [[DEF]]
; AVX512VL: [[VINSERTF32x4Z256rr:%[0-9]+]]:vr256x = VINSERTF32x4Z256rr %2, [[DEF]], 1
- ; AVX512VL: %ymm0 = COPY [[VINSERTF32x4Z256rr]]
- ; AVX512VL: RET 0, implicit %ymm0
+ ; AVX512VL: $ymm0 = COPY [[VINSERTF32x4Z256rr]]
+ ; AVX512VL: RET 0, implicit $ymm0
%0(<4 x s32>) = IMPLICIT_DEF
%1(<8 x s32>) = G_MERGE_VALUES %0(<4 x s32>), %0(<4 x s32>)
- %ymm0 = COPY %1(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %1(<8 x s32>)
+ RET 0, implicit $ymm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir b/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir
index adf27a2..2bf1d36 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir
@@ -27,12 +27,12 @@
; ALL: [[VINSERTF32x4Zrr:%[0-9]+]]:vr512 = VINSERTF32x4Zrr %2, [[DEF]], 1
; ALL: [[VINSERTF32x4Zrr1:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[VINSERTF32x4Zrr]], [[DEF]], 2
; ALL: [[VINSERTF32x4Zrr2:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[VINSERTF32x4Zrr1]], [[DEF]], 3
- ; ALL: %zmm0 = COPY [[VINSERTF32x4Zrr2]]
- ; ALL: RET 0, implicit %zmm0
+ ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrr2]]
+ ; ALL: RET 0, implicit $zmm0
%0(<4 x s32>) = IMPLICIT_DEF
%1(<16 x s32>) = G_MERGE_VALUES %0(<4 x s32>), %0(<4 x s32>), %0(<4 x s32>), %0(<4 x s32>)
- %zmm0 = COPY %1(<16 x s32>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %1(<16 x s32>)
+ RET 0, implicit $zmm0
...
---
@@ -50,12 +50,12 @@
; ALL: [[DEF:%[0-9]+]]:vr256x = IMPLICIT_DEF
; ALL: undef %2.sub_ymm:vr512 = COPY [[DEF]]
; ALL: [[VINSERTF64x4Zrr:%[0-9]+]]:vr512 = VINSERTF64x4Zrr %2, [[DEF]], 1
- ; ALL: %zmm0 = COPY [[VINSERTF64x4Zrr]]
- ; ALL: RET 0, implicit %zmm0
+ ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrr]]
+ ; ALL: RET 0, implicit $zmm0
%0(<8 x s32>) = IMPLICIT_DEF
%1(<16 x s32>) = G_MERGE_VALUES %0(<8 x s32>), %0(<8 x s32>)
- %zmm0 = COPY %1(<16 x s32>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %1(<16 x s32>)
+ RET 0, implicit $zmm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-mul-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/select-mul-scalar.mir
index f0766ff..31ae92c 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-mul-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-mul-scalar.mir
@@ -29,19 +29,19 @@
- { id: 2, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; ALL-LABEL: name: test_mul_i16
- ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY %di
- ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY %si
- ; ALL: [[IMUL16rr:%[0-9]+]]:gr16 = IMUL16rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %ax = COPY [[IMUL16rr]]
- ; ALL: RET 0, implicit %ax
- %0(s16) = COPY %di
- %1(s16) = COPY %si
+ ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY $di
+ ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY $si
+ ; ALL: [[IMUL16rr:%[0-9]+]]:gr16 = IMUL16rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $ax = COPY [[IMUL16rr]]
+ ; ALL: RET 0, implicit $ax
+ %0(s16) = COPY $di
+ %1(s16) = COPY $si
%2(s16) = G_MUL %0, %1
- %ax = COPY %2(s16)
- RET 0, implicit %ax
+ $ax = COPY %2(s16)
+ RET 0, implicit $ax
...
---
@@ -55,19 +55,19 @@
- { id: 2, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; ALL-LABEL: name: test_mul_i32
- ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; ALL: [[IMUL32rr:%[0-9]+]]:gr32 = IMUL32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %eax = COPY [[IMUL32rr]]
- ; ALL: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; ALL: [[IMUL32rr:%[0-9]+]]:gr32 = IMUL32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $eax = COPY [[IMUL32rr]]
+ ; ALL: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s32) = G_MUL %0, %1
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -81,18 +81,18 @@
- { id: 2, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
; ALL-LABEL: name: test_mul_i64
- ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; ALL: [[IMUL64rr:%[0-9]+]]:gr64 = IMUL64rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %rax = COPY [[IMUL64rr]]
- ; ALL: RET 0, implicit %rax
- %0(s64) = COPY %rdi
- %1(s64) = COPY %rsi
+ ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; ALL: [[IMUL64rr:%[0-9]+]]:gr64 = IMUL64rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $rax = COPY [[IMUL64rr]]
+ ; ALL: RET 0, implicit $rax
+ %0(s64) = COPY $rdi
+ %1(s64) = COPY $rsi
%2(s64) = G_MUL %0, %1
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-mul-vec.mir b/llvm/test/CodeGen/X86/GlobalISel/select-mul-vec.mir
index afc4081..aa26417 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-mul-vec.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-mul-vec.mir
@@ -100,19 +100,19 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_mul_v8i16
- ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm1
; CHECK: [[PMULLWrr:%[0-9]+]]:vr128 = PMULLWrr [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[PMULLWrr]]
- ; CHECK: RET 0, implicit %xmm0
- %0(<8 x s16>) = COPY %xmm0
- %1(<8 x s16>) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[PMULLWrr]]
+ ; CHECK: RET 0, implicit $xmm0
+ %0(<8 x s16>) = COPY $xmm0
+ %1(<8 x s16>) = COPY $xmm1
%2(<8 x s16>) = G_MUL %0, %1
- %xmm0 = COPY %2(<8 x s16>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<8 x s16>)
+ RET 0, implicit $xmm0
...
---
@@ -126,19 +126,19 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_mul_v8i16_avx
- ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm1
; CHECK: [[VPMULLWrr:%[0-9]+]]:vr128 = VPMULLWrr [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[VPMULLWrr]]
- ; CHECK: RET 0, implicit %xmm0
- %0(<8 x s16>) = COPY %xmm0
- %1(<8 x s16>) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[VPMULLWrr]]
+ ; CHECK: RET 0, implicit $xmm0
+ %0(<8 x s16>) = COPY $xmm0
+ %1(<8 x s16>) = COPY $xmm1
%2(<8 x s16>) = G_MUL %0, %1
- %xmm0 = COPY %2(<8 x s16>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<8 x s16>)
+ RET 0, implicit $xmm0
...
---
@@ -152,19 +152,19 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_mul_v8i16_avx512bwvl
- ; CHECK: [[COPY:%[0-9]+]]:vr128x = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr128x = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1
; CHECK: [[VPMULLWZ128rr:%[0-9]+]]:vr128x = VPMULLWZ128rr [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[VPMULLWZ128rr]]
- ; CHECK: RET 0, implicit %xmm0
- %0(<8 x s16>) = COPY %xmm0
- %1(<8 x s16>) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[VPMULLWZ128rr]]
+ ; CHECK: RET 0, implicit $xmm0
+ %0(<8 x s16>) = COPY $xmm0
+ %1(<8 x s16>) = COPY $xmm1
%2(<8 x s16>) = G_MUL %0, %1
- %xmm0 = COPY %2(<8 x s16>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<8 x s16>)
+ RET 0, implicit $xmm0
...
---
@@ -178,19 +178,19 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_mul_v4i32
- ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm1
; CHECK: [[PMULLDrr:%[0-9]+]]:vr128 = PMULLDrr [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[PMULLDrr]]
- ; CHECK: RET 0, implicit %xmm0
- %0(<4 x s32>) = COPY %xmm0
- %1(<4 x s32>) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[PMULLDrr]]
+ ; CHECK: RET 0, implicit $xmm0
+ %0(<4 x s32>) = COPY $xmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<4 x s32>) = G_MUL %0, %1
- %xmm0 = COPY %2(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -204,19 +204,19 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_mul_v4i32_avx
- ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm1
; CHECK: [[VPMULLDrr:%[0-9]+]]:vr128 = VPMULLDrr [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[VPMULLDrr]]
- ; CHECK: RET 0, implicit %xmm0
- %0(<4 x s32>) = COPY %xmm0
- %1(<4 x s32>) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[VPMULLDrr]]
+ ; CHECK: RET 0, implicit $xmm0
+ %0(<4 x s32>) = COPY $xmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<4 x s32>) = G_MUL %0, %1
- %xmm0 = COPY %2(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -230,19 +230,19 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_mul_v4i32_avx512vl
- ; CHECK: [[COPY:%[0-9]+]]:vr128x = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr128x = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1
; CHECK: [[VPMULLDZ128rr:%[0-9]+]]:vr128x = VPMULLDZ128rr [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[VPMULLDZ128rr]]
- ; CHECK: RET 0, implicit %xmm0
- %0(<4 x s32>) = COPY %xmm0
- %1(<4 x s32>) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[VPMULLDZ128rr]]
+ ; CHECK: RET 0, implicit $xmm0
+ %0(<4 x s32>) = COPY $xmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<4 x s32>) = G_MUL %0, %1
- %xmm0 = COPY %2(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -256,19 +256,19 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_mul_v2i64
- ; CHECK: [[COPY:%[0-9]+]]:vr128x = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr128x = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1
; CHECK: [[VPMULLQZ128rr:%[0-9]+]]:vr128x = VPMULLQZ128rr [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[VPMULLQZ128rr]]
- ; CHECK: RET 0, implicit %xmm0
- %0(<2 x s64>) = COPY %xmm0
- %1(<2 x s64>) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[VPMULLQZ128rr]]
+ ; CHECK: RET 0, implicit $xmm0
+ %0(<2 x s64>) = COPY $xmm0
+ %1(<2 x s64>) = COPY $xmm1
%2(<2 x s64>) = G_MUL %0, %1
- %xmm0 = COPY %2(<2 x s64>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<2 x s64>)
+ RET 0, implicit $xmm0
...
---
@@ -282,19 +282,19 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; CHECK-LABEL: name: test_mul_v16i16
- ; CHECK: [[COPY:%[0-9]+]]:vr256 = COPY %ymm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr256 = COPY %ymm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr256 = COPY $ymm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr256 = COPY $ymm1
; CHECK: [[VPMULLWYrr:%[0-9]+]]:vr256 = VPMULLWYrr [[COPY]], [[COPY1]]
- ; CHECK: %ymm0 = COPY [[VPMULLWYrr]]
- ; CHECK: RET 0, implicit %ymm0
- %0(<16 x s16>) = COPY %ymm0
- %1(<16 x s16>) = COPY %ymm1
+ ; CHECK: $ymm0 = COPY [[VPMULLWYrr]]
+ ; CHECK: RET 0, implicit $ymm0
+ %0(<16 x s16>) = COPY $ymm0
+ %1(<16 x s16>) = COPY $ymm1
%2(<16 x s16>) = G_MUL %0, %1
- %ymm0 = COPY %2(<16 x s16>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<16 x s16>)
+ RET 0, implicit $ymm0
...
---
@@ -308,19 +308,19 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; CHECK-LABEL: name: test_mul_v16i16_avx512bwvl
- ; CHECK: [[COPY:%[0-9]+]]:vr256x = COPY %ymm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr256x = COPY %ymm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr256x = COPY $ymm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1
; CHECK: [[VPMULLWZ256rr:%[0-9]+]]:vr256x = VPMULLWZ256rr [[COPY]], [[COPY1]]
- ; CHECK: %ymm0 = COPY [[VPMULLWZ256rr]]
- ; CHECK: RET 0, implicit %ymm0
- %0(<16 x s16>) = COPY %ymm0
- %1(<16 x s16>) = COPY %ymm1
+ ; CHECK: $ymm0 = COPY [[VPMULLWZ256rr]]
+ ; CHECK: RET 0, implicit $ymm0
+ %0(<16 x s16>) = COPY $ymm0
+ %1(<16 x s16>) = COPY $ymm1
%2(<16 x s16>) = G_MUL %0, %1
- %ymm0 = COPY %2(<16 x s16>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<16 x s16>)
+ RET 0, implicit $ymm0
...
---
@@ -334,19 +334,19 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; CHECK-LABEL: name: test_mul_v8i32
- ; CHECK: [[COPY:%[0-9]+]]:vr256 = COPY %ymm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr256 = COPY %ymm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr256 = COPY $ymm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr256 = COPY $ymm1
; CHECK: [[VPMULLDYrr:%[0-9]+]]:vr256 = VPMULLDYrr [[COPY]], [[COPY1]]
- ; CHECK: %ymm0 = COPY [[VPMULLDYrr]]
- ; CHECK: RET 0, implicit %ymm0
- %0(<8 x s32>) = COPY %ymm0
- %1(<8 x s32>) = COPY %ymm1
+ ; CHECK: $ymm0 = COPY [[VPMULLDYrr]]
+ ; CHECK: RET 0, implicit $ymm0
+ %0(<8 x s32>) = COPY $ymm0
+ %1(<8 x s32>) = COPY $ymm1
%2(<8 x s32>) = G_MUL %0, %1
- %ymm0 = COPY %2(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<8 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -360,19 +360,19 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; CHECK-LABEL: name: test_mul_v8i32_avx512vl
- ; CHECK: [[COPY:%[0-9]+]]:vr256x = COPY %ymm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr256x = COPY %ymm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr256x = COPY $ymm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1
; CHECK: [[VPMULLDZ256rr:%[0-9]+]]:vr256x = VPMULLDZ256rr [[COPY]], [[COPY1]]
- ; CHECK: %ymm0 = COPY [[VPMULLDZ256rr]]
- ; CHECK: RET 0, implicit %ymm0
- %0(<8 x s32>) = COPY %ymm0
- %1(<8 x s32>) = COPY %ymm1
+ ; CHECK: $ymm0 = COPY [[VPMULLDZ256rr]]
+ ; CHECK: RET 0, implicit $ymm0
+ %0(<8 x s32>) = COPY $ymm0
+ %1(<8 x s32>) = COPY $ymm1
%2(<8 x s32>) = G_MUL %0, %1
- %ymm0 = COPY %2(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<8 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -386,19 +386,19 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; CHECK-LABEL: name: test_mul_v4i64
- ; CHECK: [[COPY:%[0-9]+]]:vr256x = COPY %ymm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr256x = COPY %ymm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr256x = COPY $ymm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1
; CHECK: [[VPMULLQZ256rr:%[0-9]+]]:vr256x = VPMULLQZ256rr [[COPY]], [[COPY1]]
- ; CHECK: %ymm0 = COPY [[VPMULLQZ256rr]]
- ; CHECK: RET 0, implicit %ymm0
- %0(<4 x s64>) = COPY %ymm0
- %1(<4 x s64>) = COPY %ymm1
+ ; CHECK: $ymm0 = COPY [[VPMULLQZ256rr]]
+ ; CHECK: RET 0, implicit $ymm0
+ %0(<4 x s64>) = COPY $ymm0
+ %1(<4 x s64>) = COPY $ymm1
%2(<4 x s64>) = G_MUL %0, %1
- %ymm0 = COPY %2(<4 x s64>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<4 x s64>)
+ RET 0, implicit $ymm0
...
---
@@ -412,19 +412,19 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; CHECK-LABEL: name: test_mul_v32i16
- ; CHECK: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1
; CHECK: [[VPMULLWZrr:%[0-9]+]]:vr512 = VPMULLWZrr [[COPY]], [[COPY1]]
- ; CHECK: %zmm0 = COPY [[VPMULLWZrr]]
- ; CHECK: RET 0, implicit %zmm0
- %0(<32 x s16>) = COPY %zmm0
- %1(<32 x s16>) = COPY %zmm1
+ ; CHECK: $zmm0 = COPY [[VPMULLWZrr]]
+ ; CHECK: RET 0, implicit $zmm0
+ %0(<32 x s16>) = COPY $zmm0
+ %1(<32 x s16>) = COPY $zmm1
%2(<32 x s16>) = G_MUL %0, %1
- %zmm0 = COPY %2(<32 x s16>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<32 x s16>)
+ RET 0, implicit $zmm0
...
---
@@ -438,19 +438,19 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; CHECK-LABEL: name: test_mul_v16i32
- ; CHECK: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1
; CHECK: [[VPMULLDZrr:%[0-9]+]]:vr512 = VPMULLDZrr [[COPY]], [[COPY1]]
- ; CHECK: %zmm0 = COPY [[VPMULLDZrr]]
- ; CHECK: RET 0, implicit %zmm0
- %0(<16 x s32>) = COPY %zmm0
- %1(<16 x s32>) = COPY %zmm1
+ ; CHECK: $zmm0 = COPY [[VPMULLDZrr]]
+ ; CHECK: RET 0, implicit $zmm0
+ %0(<16 x s32>) = COPY $zmm0
+ %1(<16 x s32>) = COPY $zmm1
%2(<16 x s32>) = G_MUL %0, %1
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $zmm0
...
---
@@ -464,18 +464,18 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; CHECK-LABEL: name: test_mul_v8i64
- ; CHECK: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1
; CHECK: [[VPMULLQZrr:%[0-9]+]]:vr512 = VPMULLQZrr [[COPY]], [[COPY1]]
- ; CHECK: %zmm0 = COPY [[VPMULLQZrr]]
- ; CHECK: RET 0, implicit %zmm0
- %0(<8 x s64>) = COPY %zmm0
- %1(<8 x s64>) = COPY %zmm1
+ ; CHECK: $zmm0 = COPY [[VPMULLQZrr]]
+ ; CHECK: RET 0, implicit $zmm0
+ %0(<8 x s64>) = COPY $zmm0
+ %1(<8 x s64>) = COPY $zmm1
%2(<8 x s64>) = G_MUL %0, %1
- %zmm0 = COPY %2(<8 x s64>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<8 x s64>)
+ RET 0, implicit $zmm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-or-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/select-or-scalar.mir
index 21c6ed5..111c4f2 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-or-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-or-scalar.mir
@@ -38,19 +38,19 @@
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; ALL-LABEL: name: test_or_i8
- ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY %dil
- ; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY %sil
- ; ALL: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %al = COPY [[OR8rr]]
- ; ALL: RET 0, implicit %al
- %0(s8) = COPY %dil
- %1(s8) = COPY %sil
+ ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY $dil
+ ; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY $sil
+ ; ALL: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $al = COPY [[OR8rr]]
+ ; ALL: RET 0, implicit $al
+ %0(s8) = COPY $dil
+ %1(s8) = COPY $sil
%2(s8) = G_OR %0, %1
- %al = COPY %2(s8)
- RET 0, implicit %al
+ $al = COPY %2(s8)
+ RET 0, implicit $al
...
---
@@ -68,19 +68,19 @@
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; ALL-LABEL: name: test_or_i16
- ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY %di
- ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY %si
- ; ALL: [[OR16rr:%[0-9]+]]:gr16 = OR16rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %ax = COPY [[OR16rr]]
- ; ALL: RET 0, implicit %ax
- %0(s16) = COPY %di
- %1(s16) = COPY %si
+ ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY $di
+ ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY $si
+ ; ALL: [[OR16rr:%[0-9]+]]:gr16 = OR16rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $ax = COPY [[OR16rr]]
+ ; ALL: RET 0, implicit $ax
+ %0(s16) = COPY $di
+ %1(s16) = COPY $si
%2(s16) = G_OR %0, %1
- %ax = COPY %2(s16)
- RET 0, implicit %ax
+ $ax = COPY %2(s16)
+ RET 0, implicit $ax
...
---
@@ -98,19 +98,19 @@
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; ALL-LABEL: name: test_or_i32
- ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; ALL: [[OR32rr:%[0-9]+]]:gr32 = OR32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %eax = COPY [[OR32rr]]
- ; ALL: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; ALL: [[OR32rr:%[0-9]+]]:gr32 = OR32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $eax = COPY [[OR32rr]]
+ ; ALL: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s32) = G_OR %0, %1
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -128,18 +128,18 @@
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
; ALL-LABEL: name: test_or_i64
- ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; ALL: [[OR64rr:%[0-9]+]]:gr64 = OR64rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %rax = COPY [[OR64rr]]
- ; ALL: RET 0, implicit %rax
- %0(s64) = COPY %rdi
- %1(s64) = COPY %rsi
+ ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; ALL: [[OR64rr:%[0-9]+]]:gr64 = OR64rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $rax = COPY [[OR64rr]]
+ ; ALL: RET 0, implicit $rax
+ %0(s64) = COPY $rdi
+ %1(s64) = COPY $rsi
%2(s64) = G_OR %0, %1
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-phi.mir b/llvm/test/CodeGen/X86/GlobalISel/select-phi.mir
index 5d21cdb..dabde6f 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-phi.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-phi.mir
@@ -119,31 +119,31 @@
; ALL-LABEL: name: test_i8
; ALL: bb.0.entry:
; ALL: successors: %bb.2(0x40000000), %bb.1(0x40000000)
- ; ALL: liveins: %edi, %edx, %esi
- ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
+ ; ALL: liveins: $edi, $edx, $esi
+ ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; ALL: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit
- ; ALL: [[COPY3:%[0-9]+]]:gr32 = COPY %edx
+ ; ALL: [[COPY3:%[0-9]+]]:gr32 = COPY $edx
; ALL: [[COPY4:%[0-9]+]]:gr8 = COPY [[COPY3]].sub_8bit
- ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def %eflags
- ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def %eflags
- ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit %eflags
- ; ALL: TEST8ri [[SETGr]], 1, implicit-def %eflags
- ; ALL: JNE_1 %bb.2, implicit %eflags
+ ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
+ ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def $eflags
+ ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags
+ ; ALL: TEST8ri [[SETGr]], 1, implicit-def $eflags
+ ; ALL: JNE_1 %bb.2, implicit $eflags
; ALL: bb.1.cond.false:
; ALL: successors: %bb.2(0x80000000)
; ALL: bb.2.cond.end:
; ALL: [[PHI:%[0-9]+]]:gr8 = PHI [[COPY4]], %bb.1, [[COPY2]], %bb.0
- ; ALL: %al = COPY [[PHI]]
- ; ALL: RET 0, implicit %al
+ ; ALL: $al = COPY [[PHI]]
+ ; ALL: RET 0, implicit $al
bb.1.entry:
successors: %bb.3(0x40000000), %bb.2(0x40000000)
- liveins: %edi, %edx, %esi
+ liveins: $edi, $edx, $esi
- %0:gpr(s32) = COPY %edi
- %3:gpr(s32) = COPY %esi
+ %0:gpr(s32) = COPY $edi
+ %3:gpr(s32) = COPY $esi
%1:gpr(s8) = G_TRUNC %3(s32)
- %4:gpr(s32) = COPY %edx
+ %4:gpr(s32) = COPY $edx
%2:gpr(s8) = G_TRUNC %4(s32)
%5:gpr(s32) = G_CONSTANT i32 0
%6:gpr(s1) = G_ICMP intpred(sgt), %0(s32), %5
@@ -155,8 +155,8 @@
bb.3.cond.end:
%7:gpr(s8) = G_PHI %2(s8), %bb.2, %1(s8), %bb.1
- %al = COPY %7(s8)
- RET 0, implicit %al
+ $al = COPY %7(s8)
+ RET 0, implicit $al
...
---
@@ -178,31 +178,31 @@
; ALL-LABEL: name: test_i16
; ALL: bb.0.entry:
; ALL: successors: %bb.2(0x40000000), %bb.1(0x40000000)
- ; ALL: liveins: %edi, %edx, %esi
- ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
+ ; ALL: liveins: $edi, $edx, $esi
+ ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; ALL: [[COPY2:%[0-9]+]]:gr16 = COPY [[COPY1]].sub_16bit
- ; ALL: [[COPY3:%[0-9]+]]:gr32 = COPY %edx
+ ; ALL: [[COPY3:%[0-9]+]]:gr32 = COPY $edx
; ALL: [[COPY4:%[0-9]+]]:gr16 = COPY [[COPY3]].sub_16bit
- ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def %eflags
- ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def %eflags
- ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit %eflags
- ; ALL: TEST8ri [[SETGr]], 1, implicit-def %eflags
- ; ALL: JNE_1 %bb.2, implicit %eflags
+ ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
+ ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def $eflags
+ ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags
+ ; ALL: TEST8ri [[SETGr]], 1, implicit-def $eflags
+ ; ALL: JNE_1 %bb.2, implicit $eflags
; ALL: bb.1.cond.false:
; ALL: successors: %bb.2(0x80000000)
; ALL: bb.2.cond.end:
; ALL: [[PHI:%[0-9]+]]:gr16 = PHI [[COPY4]], %bb.1, [[COPY2]], %bb.0
- ; ALL: %ax = COPY [[PHI]]
- ; ALL: RET 0, implicit %ax
+ ; ALL: $ax = COPY [[PHI]]
+ ; ALL: RET 0, implicit $ax
bb.1.entry:
successors: %bb.3(0x40000000), %bb.2(0x40000000)
- liveins: %edi, %edx, %esi
+ liveins: $edi, $edx, $esi
- %0:gpr(s32) = COPY %edi
- %3:gpr(s32) = COPY %esi
+ %0:gpr(s32) = COPY $edi
+ %3:gpr(s32) = COPY $esi
%1:gpr(s16) = G_TRUNC %3(s32)
- %4:gpr(s32) = COPY %edx
+ %4:gpr(s32) = COPY $edx
%2:gpr(s16) = G_TRUNC %4(s32)
%5:gpr(s32) = G_CONSTANT i32 0
%6:gpr(s1) = G_ICMP intpred(sgt), %0(s32), %5
@@ -214,8 +214,8 @@
bb.3.cond.end:
%7:gpr(s16) = G_PHI %2(s16), %bb.2, %1(s16), %bb.1
- %ax = COPY %7(s16)
- RET 0, implicit %ax
+ $ax = COPY %7(s16)
+ RET 0, implicit $ax
...
---
@@ -235,15 +235,15 @@
; ALL-LABEL: name: test_i32
; ALL: bb.0.entry:
; ALL: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; ALL: liveins: %edi, %edx, %esi
- ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; ALL: [[COPY2:%[0-9]+]]:gr32 = COPY %edx
- ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def %eflags
- ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def %eflags
- ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit %eflags
- ; ALL: TEST8ri [[SETGr]], 1, implicit-def %eflags
- ; ALL: JNE_1 %bb.1, implicit %eflags
+ ; ALL: liveins: $edi, $edx, $esi
+ ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; ALL: [[COPY2:%[0-9]+]]:gr32 = COPY $edx
+ ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
+ ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def $eflags
+ ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags
+ ; ALL: TEST8ri [[SETGr]], 1, implicit-def $eflags
+ ; ALL: JNE_1 %bb.1, implicit $eflags
; ALL: JMP_1 %bb.2
; ALL: bb.1.cond.true:
; ALL: successors: %bb.3(0x80000000)
@@ -252,15 +252,15 @@
; ALL: successors: %bb.3(0x80000000)
; ALL: bb.3.cond.end:
; ALL: [[PHI:%[0-9]+]]:gr32 = PHI [[COPY1]], %bb.1, [[COPY2]], %bb.2
- ; ALL: %eax = COPY [[PHI]]
- ; ALL: RET 0, implicit %eax
+ ; ALL: $eax = COPY [[PHI]]
+ ; ALL: RET 0, implicit $eax
bb.1.entry:
successors: %bb.2(0x40000000), %bb.3(0x40000000)
- liveins: %edi, %edx, %esi
+ liveins: $edi, $edx, $esi
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
- %2(s32) = COPY %edx
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
+ %2(s32) = COPY $edx
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
G_BRCOND %4(s1), %bb.2
@@ -277,8 +277,8 @@
bb.4.cond.end:
%5(s32) = G_PHI %1(s32), %bb.2, %2(s32), %bb.3
- %eax = COPY %5(s32)
- RET 0, implicit %eax
+ $eax = COPY %5(s32)
+ RET 0, implicit $eax
...
---
@@ -298,15 +298,15 @@
; ALL-LABEL: name: test_i64
; ALL: bb.0.entry:
; ALL: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; ALL: liveins: %edi, %rdx, %rsi
- ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; ALL: [[COPY2:%[0-9]+]]:gr64 = COPY %rdx
- ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def %eflags
- ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def %eflags
- ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit %eflags
- ; ALL: TEST8ri [[SETGr]], 1, implicit-def %eflags
- ; ALL: JNE_1 %bb.1, implicit %eflags
+ ; ALL: liveins: $edi, $rdx, $rsi
+ ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; ALL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdx
+ ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
+ ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def $eflags
+ ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags
+ ; ALL: TEST8ri [[SETGr]], 1, implicit-def $eflags
+ ; ALL: JNE_1 %bb.1, implicit $eflags
; ALL: JMP_1 %bb.2
; ALL: bb.1.cond.true:
; ALL: successors: %bb.3(0x80000000)
@@ -315,15 +315,15 @@
; ALL: successors: %bb.3(0x80000000)
; ALL: bb.3.cond.end:
; ALL: [[PHI:%[0-9]+]]:gr64 = PHI [[COPY1]], %bb.1, [[COPY2]], %bb.2
- ; ALL: %rax = COPY [[PHI]]
- ; ALL: RET 0, implicit %rax
+ ; ALL: $rax = COPY [[PHI]]
+ ; ALL: RET 0, implicit $rax
bb.1.entry:
successors: %bb.2(0x40000000), %bb.3(0x40000000)
- liveins: %edi, %rdx, %rsi
+ liveins: $edi, $rdx, $rsi
- %0(s32) = COPY %edi
- %1(s64) = COPY %rsi
- %2(s64) = COPY %rdx
+ %0(s32) = COPY $edi
+ %1(s64) = COPY $rsi
+ %2(s64) = COPY $rdx
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
G_BRCOND %4(s1), %bb.2
@@ -340,8 +340,8 @@
bb.4.cond.end:
%5(s64) = G_PHI %1(s64), %bb.2, %2(s64), %bb.3
- %rax = COPY %5(s64)
- RET 0, implicit %rax
+ $rax = COPY %5(s64)
+ RET 0, implicit $rax
...
---
@@ -371,16 +371,16 @@
constants:
# ALL-LABEL: bb.3.cond.end:
# ALL: %5:fr32 = PHI %1, %bb.1, %2, %bb.2
-# ALL-NEXT: %xmm0 = COPY %5
-# ALL-NEXT: RET 0, implicit %xmm0
+# ALL-NEXT: $xmm0 = COPY %5
+# ALL-NEXT: RET 0, implicit $xmm0
body: |
bb.1.entry:
successors: %bb.2(0x40000000), %bb.3(0x40000000)
- liveins: %edi, %xmm0, %xmm1
+ liveins: $edi, $xmm0, $xmm1
- %0(s32) = COPY %edi
- %1(s32) = COPY %xmm0
- %2(s32) = COPY %xmm1
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $xmm0
+ %2(s32) = COPY $xmm1
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
G_BRCOND %4(s1), %bb.2
@@ -397,8 +397,8 @@
bb.4.cond.end:
%5(s32) = G_PHI %1(s32), %bb.2, %2(s32), %bb.3
- %xmm0 = COPY %5(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %5(s32)
+ RET 0, implicit $xmm0
...
---
@@ -424,16 +424,16 @@
- { id: 5, class: vecr, preferred-register: '' }
# ALL-LABEL: bb.3.cond.end:
# ALL: %5:fr64 = PHI %1, %bb.1, %2, %bb.2
-# ALL-NEXT: %xmm0 = COPY %5
-# ALL-NEXT: RET 0, implicit %xmm0
+# ALL-NEXT: $xmm0 = COPY %5
+# ALL-NEXT: RET 0, implicit $xmm0
body: |
bb.1.entry:
successors: %bb.2(0x40000000), %bb.3(0x40000000)
- liveins: %edi, %xmm0, %xmm1
+ liveins: $edi, $xmm0, $xmm1
- %0(s32) = COPY %edi
- %1(s64) = COPY %xmm0
- %2(s64) = COPY %xmm1
+ %0(s32) = COPY $edi
+ %1(s64) = COPY $xmm0
+ %2(s64) = COPY $xmm1
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
G_BRCOND %4(s1), %bb.2
@@ -450,7 +450,7 @@
bb.4.cond.end:
%5(s64) = G_PHI %1(s64), %bb.2, %2(s64), %bb.3
- %xmm0 = COPY %5(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %5(s64)
+ RET 0, implicit $xmm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-sub-v128.mir b/llvm/test/CodeGen/X86/GlobalISel/select-sub-v128.mir
index bb05007..57ba6b5 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-sub-v128.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-sub-v128.mir
@@ -44,13 +44,13 @@
# AVX512BWVL: %2:vr128x = VPSUBBZ128rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<16 x s8>) = COPY %xmm0
- %1(<16 x s8>) = COPY %xmm1
+ %0(<16 x s8>) = COPY $xmm0
+ %1(<16 x s8>) = COPY $xmm1
%2(<16 x s8>) = G_SUB %0, %1
- %xmm0 = COPY %2(<16 x s8>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<16 x s8>)
+ RET 0, implicit $xmm0
...
---
@@ -72,13 +72,13 @@
# AVX512BWVL: %2:vr128x = VPSUBWZ128rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<8 x s16>) = COPY %xmm0
- %1(<8 x s16>) = COPY %xmm1
+ %0(<8 x s16>) = COPY $xmm0
+ %1(<8 x s16>) = COPY $xmm1
%2(<8 x s16>) = G_SUB %0, %1
- %xmm0 = COPY %2(<8 x s16>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<8 x s16>)
+ RET 0, implicit $xmm0
...
---
@@ -100,13 +100,13 @@
# AVX512BWVL: %2:vr128x = VPSUBDZ128rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<4 x s32>) = COPY %xmm0
- %1(<4 x s32>) = COPY %xmm1
+ %0(<4 x s32>) = COPY $xmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<4 x s32>) = G_SUB %0, %1
- %xmm0 = COPY %2(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -128,12 +128,12 @@
# AVX512BWVL: %2:vr128x = VPSUBQZ128rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<2 x s64>) = COPY %xmm0
- %1(<2 x s64>) = COPY %xmm1
+ %0(<2 x s64>) = COPY $xmm0
+ %1(<2 x s64>) = COPY $xmm1
%2(<2 x s64>) = G_SUB %0, %1
- %xmm0 = COPY %2(<2 x s64>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<2 x s64>)
+ RET 0, implicit $xmm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-sub-v256.mir b/llvm/test/CodeGen/X86/GlobalISel/select-sub-v256.mir
index 614d131..812ac46 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-sub-v256.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-sub-v256.mir
@@ -40,13 +40,13 @@
# AVX512BWVL: %2:vr256x = VPSUBBZ256rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<32 x s8>) = COPY %ymm0
- %1(<32 x s8>) = COPY %ymm1
+ %0(<32 x s8>) = COPY $ymm0
+ %1(<32 x s8>) = COPY $ymm1
%2(<32 x s8>) = G_SUB %0, %1
- %ymm0 = COPY %2(<32 x s8>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<32 x s8>)
+ RET 0, implicit $ymm0
...
---
@@ -66,13 +66,13 @@
# AVX512BWVL: %2:vr256x = VPSUBWZ256rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<16 x s16>) = COPY %ymm0
- %1(<16 x s16>) = COPY %ymm1
+ %0(<16 x s16>) = COPY $ymm0
+ %1(<16 x s16>) = COPY $ymm1
%2(<16 x s16>) = G_SUB %0, %1
- %ymm0 = COPY %2(<16 x s16>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<16 x s16>)
+ RET 0, implicit $ymm0
...
---
@@ -92,13 +92,13 @@
# AVX512BWVL: %2:vr256x = VPSUBDZ256rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<8 x s32>) = COPY %ymm0
- %1(<8 x s32>) = COPY %ymm1
+ %0(<8 x s32>) = COPY $ymm0
+ %1(<8 x s32>) = COPY $ymm1
%2(<8 x s32>) = G_SUB %0, %1
- %ymm0 = COPY %2(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<8 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -118,12 +118,12 @@
# AVX512BWVL: %2:vr256x = VPSUBQZ256rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<4 x s64>) = COPY %ymm0
- %1(<4 x s64>) = COPY %ymm1
+ %0(<4 x s64>) = COPY $ymm0
+ %1(<4 x s64>) = COPY $ymm1
%2(<4 x s64>) = G_SUB %0, %1
- %ymm0 = COPY %2(<4 x s64>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<4 x s64>)
+ RET 0, implicit $ymm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-sub-v512.mir b/llvm/test/CodeGen/X86/GlobalISel/select-sub-v512.mir
index 6794921..b56c118 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-sub-v512.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-sub-v512.mir
@@ -36,19 +36,19 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_sub_v64i8
- ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1
+ ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1
; ALL: [[VPSUBBZrr:%[0-9]+]]:vr512 = VPSUBBZrr [[COPY]], [[COPY1]]
- ; ALL: %zmm0 = COPY [[VPSUBBZrr]]
- ; ALL: RET 0, implicit %zmm0
- %0(<64 x s8>) = COPY %zmm0
- %1(<64 x s8>) = COPY %zmm1
+ ; ALL: $zmm0 = COPY [[VPSUBBZrr]]
+ ; ALL: RET 0, implicit $zmm0
+ %0(<64 x s8>) = COPY $zmm0
+ %1(<64 x s8>) = COPY $zmm1
%2(<64 x s8>) = G_SUB %0, %1
- %zmm0 = COPY %2(<64 x s8>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<64 x s8>)
+ RET 0, implicit $zmm0
...
---
@@ -62,19 +62,19 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_sub_v32i16
- ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1
+ ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1
; ALL: [[VPSUBWZrr:%[0-9]+]]:vr512 = VPSUBWZrr [[COPY]], [[COPY1]]
- ; ALL: %zmm0 = COPY [[VPSUBWZrr]]
- ; ALL: RET 0, implicit %zmm0
- %0(<32 x s16>) = COPY %zmm0
- %1(<32 x s16>) = COPY %zmm1
+ ; ALL: $zmm0 = COPY [[VPSUBWZrr]]
+ ; ALL: RET 0, implicit $zmm0
+ %0(<32 x s16>) = COPY $zmm0
+ %1(<32 x s16>) = COPY $zmm1
%2(<32 x s16>) = G_SUB %0, %1
- %zmm0 = COPY %2(<32 x s16>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<32 x s16>)
+ RET 0, implicit $zmm0
...
---
@@ -88,19 +88,19 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_sub_v16i32
- ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1
+ ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1
; ALL: [[VPSUBDZrr:%[0-9]+]]:vr512 = VPSUBDZrr [[COPY]], [[COPY1]]
- ; ALL: %zmm0 = COPY [[VPSUBDZrr]]
- ; ALL: RET 0, implicit %zmm0
- %0(<16 x s32>) = COPY %zmm0
- %1(<16 x s32>) = COPY %zmm1
+ ; ALL: $zmm0 = COPY [[VPSUBDZrr]]
+ ; ALL: RET 0, implicit $zmm0
+ %0(<16 x s32>) = COPY $zmm0
+ %1(<16 x s32>) = COPY $zmm1
%2(<16 x s32>) = G_SUB %0, %1
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $zmm0
...
---
@@ -114,18 +114,18 @@
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_sub_v8i64
- ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1
+ ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1
; ALL: [[VPSUBQZrr:%[0-9]+]]:vr512 = VPSUBQZrr [[COPY]], [[COPY1]]
- ; ALL: %zmm0 = COPY [[VPSUBQZrr]]
- ; ALL: RET 0, implicit %zmm0
- %0(<8 x s64>) = COPY %zmm0
- %1(<8 x s64>) = COPY %zmm1
+ ; ALL: $zmm0 = COPY [[VPSUBQZrr]]
+ ; ALL: RET 0, implicit $zmm0
+ %0(<8 x s64>) = COPY $zmm0
+ %1(<8 x s64>) = COPY $zmm1
%2(<8 x s64>) = G_SUB %0, %1
- %zmm0 = COPY %2(<8 x s64>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<8 x s64>)
+ RET 0, implicit $zmm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-sub.mir b/llvm/test/CodeGen/X86/GlobalISel/select-sub.mir
index d2f99d1..64e0557 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-sub.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-sub.mir
@@ -33,17 +33,17 @@
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# ALL: %0:gr64 = COPY %rdi
-# ALL-NEXT: %1:gr64 = COPY %rsi
+# ALL: %0:gr64 = COPY $rdi
+# ALL-NEXT: %1:gr64 = COPY $rsi
# ALL-NEXT: %2:gr64 = SUB64rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
- %0(s64) = COPY %rdi
- %1(s64) = COPY %rsi
+ %0(s64) = COPY $rdi
+ %1(s64) = COPY $rsi
%2(s64) = G_SUB %0, %1
- %rax = COPY %2(s64)
+ $rax = COPY %2(s64)
...
@@ -55,17 +55,17 @@
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# ALL: %0:gr32 = COPY %edi
-# ALL-NEXT: %1:gr32 = COPY %esi
+# ALL: %0:gr32 = COPY $edi
+# ALL-NEXT: %1:gr32 = COPY $esi
# ALL-NEXT: %2:gr32 = SUB32rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s32) = G_SUB %0, %1
- %eax = COPY %2(s32)
+ $eax = COPY %2(s32)
...
---
@@ -79,23 +79,23 @@
- { id: 0, class: vecr }
- { id: 1, class: vecr }
- { id: 2, class: vecr }
-# NO_AVX512VL: %0:vr128 = COPY %xmm0
-# AVX512VL: %0:vr128x = COPY %xmm0
-# NO_AVX512VL: %1:vr128 = COPY %xmm1
-# AVX512VL: %1:vr128x = COPY %xmm1
+# NO_AVX512VL: %0:vr128 = COPY $xmm0
+# AVX512VL: %0:vr128x = COPY $xmm0
+# NO_AVX512VL: %1:vr128 = COPY $xmm1
+# AVX512VL: %1:vr128x = COPY $xmm1
# SSE-NEXT: %2:vr128 = PSUBDrr %0, %1
# AVX-NEXT: %2:vr128 = VPSUBDrr %0, %1
# AVX512F-NEXT: %2:vr128 = VPSUBDrr %0, %1
# AVX512VL-NEXT: %2:vr128x = VPSUBDZ128rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<4 x s32>) = COPY %xmm0
- %1(<4 x s32>) = COPY %xmm1
+ %0(<4 x s32>) = COPY $xmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<4 x s32>) = G_SUB %0, %1
- %xmm0 = COPY %2(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -109,23 +109,23 @@
- { id: 0, class: vecr }
- { id: 1, class: vecr }
- { id: 2, class: vecr }
-# NO_AVX512VL: %0:vr128 = COPY %xmm0
-# NO_AVX512VL: %1:vr128 = COPY %xmm1
+# NO_AVX512VL: %0:vr128 = COPY $xmm0
+# NO_AVX512VL: %1:vr128 = COPY $xmm1
# SSE-NEXT: %2:vr128 = SUBPSrr %0, %1
# AVX-NEXT: %2:vr128 = VSUBPSrr %0, %1
# AVX512F-NEXT: %2:vr128 = VSUBPSrr %0, %1
#
-# AVX512VL: %0:vr128x = COPY %xmm0
-# AVX512VL: %1:vr128x = COPY %xmm1
+# AVX512VL: %0:vr128x = COPY $xmm0
+# AVX512VL: %1:vr128x = COPY $xmm1
# AVX512VL-NEXT: %2:vr128x = VSUBPSZ128rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<4 x s32>) = COPY %xmm0
- %1(<4 x s32>) = COPY %xmm1
+ %0(<4 x s32>) = COPY $xmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<4 x s32>) = G_FSUB %0, %1
- %xmm0 = COPY %2(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit $xmm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-trunc.mir b/llvm/test/CodeGen/X86/GlobalISel/select-trunc.mir
index d491297..aece6a3 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-trunc.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-trunc.mir
@@ -43,18 +43,18 @@
- { id: 2, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: trunc_i32toi1
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
; CHECK: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit
- ; CHECK: %al = COPY [[COPY1]]
- ; CHECK: RET 0, implicit %al
- %0(s32) = COPY %edi
+ ; CHECK: $al = COPY [[COPY1]]
+ ; CHECK: RET 0, implicit $al
+ %0(s32) = COPY $edi
%1(s1) = G_TRUNC %0(s32)
%2(s8) = G_ANYEXT %1(s1)
- %al = COPY %2(s8)
- RET 0, implicit %al
+ $al = COPY %2(s8)
+ RET 0, implicit $al
...
---
@@ -67,17 +67,17 @@
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: trunc_i32toi8
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
; CHECK: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit
- ; CHECK: %al = COPY [[COPY1]]
- ; CHECK: RET 0, implicit %al
- %0(s32) = COPY %edi
+ ; CHECK: $al = COPY [[COPY1]]
+ ; CHECK: RET 0, implicit $al
+ %0(s32) = COPY $edi
%1(s8) = G_TRUNC %0(s32)
- %al = COPY %1(s8)
- RET 0, implicit %al
+ $al = COPY %1(s8)
+ RET 0, implicit $al
...
---
@@ -90,17 +90,17 @@
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: trunc_i32toi16
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
; CHECK: [[COPY1:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit
- ; CHECK: %ax = COPY [[COPY1]]
- ; CHECK: RET 0, implicit %ax
- %0(s32) = COPY %edi
+ ; CHECK: $ax = COPY [[COPY1]]
+ ; CHECK: RET 0, implicit $ax
+ %0(s32) = COPY $edi
%1(s16) = G_TRUNC %0(s32)
- %ax = COPY %1(s16)
- RET 0, implicit %ax
+ $ax = COPY %1(s16)
+ RET 0, implicit $ax
...
---
@@ -113,17 +113,17 @@
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; CHECK-LABEL: name: trunc_i64toi8
- ; CHECK: [[COPY:%[0-9]+]]:gr64_with_sub_8bit = COPY %rdi
+ ; CHECK: [[COPY:%[0-9]+]]:gr64_with_sub_8bit = COPY $rdi
; CHECK: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit
- ; CHECK: %al = COPY [[COPY1]]
- ; CHECK: RET 0, implicit %al
- %0(s64) = COPY %rdi
+ ; CHECK: $al = COPY [[COPY1]]
+ ; CHECK: RET 0, implicit $al
+ %0(s64) = COPY $rdi
%1(s8) = G_TRUNC %0(s64)
- %al = COPY %1(s8)
- RET 0, implicit %al
+ $al = COPY %1(s8)
+ RET 0, implicit $al
...
---
@@ -136,17 +136,17 @@
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; CHECK-LABEL: name: trunc_i64toi16
- ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; CHECK: [[COPY1:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit
- ; CHECK: %ax = COPY [[COPY1]]
- ; CHECK: RET 0, implicit %ax
- %0(s64) = COPY %rdi
+ ; CHECK: $ax = COPY [[COPY1]]
+ ; CHECK: RET 0, implicit $ax
+ %0(s64) = COPY $rdi
%1(s16) = G_TRUNC %0(s64)
- %ax = COPY %1(s16)
- RET 0, implicit %ax
+ $ax = COPY %1(s16)
+ RET 0, implicit $ax
...
---
@@ -159,16 +159,16 @@
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; CHECK-LABEL: name: trunc_i64toi32
- ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY [[COPY]].sub_32bit
- ; CHECK: %eax = COPY [[COPY1]]
- ; CHECK: RET 0, implicit %eax
- %0(s64) = COPY %rdi
+ ; CHECK: $eax = COPY [[COPY1]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s64) = COPY $rdi
%1(s32) = G_TRUNC %0(s64)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-undef.mir b/llvm/test/CodeGen/X86/GlobalISel/select-undef.mir
index 897ed85..92fa14e 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-undef.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-undef.mir
@@ -27,11 +27,11 @@
bb.1 (%ir-block.0):
; ALL-LABEL: name: test
; ALL: [[DEF:%[0-9]+]]:gr8 = IMPLICIT_DEF
- ; ALL: %al = COPY [[DEF]]
- ; ALL: RET 0, implicit %al
+ ; ALL: $al = COPY [[DEF]]
+ ; ALL: RET 0, implicit $al
%0(s8) = G_IMPLICIT_DEF
- %al = COPY %0(s8)
- RET 0, implicit %al
+ $al = COPY %0(s8)
+ RET 0, implicit $al
...
---
@@ -49,18 +49,18 @@
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; ALL-LABEL: name: test2
- ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY %dil
+ ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY $dil
; ALL: [[DEF:%[0-9]+]]:gr8 = IMPLICIT_DEF
- ; ALL: [[ADD8rr:%[0-9]+]]:gr8 = ADD8rr [[COPY]], [[DEF]], implicit-def %eflags
- ; ALL: %al = COPY [[ADD8rr]]
- ; ALL: RET 0, implicit %al
- %0(s8) = COPY %dil
+ ; ALL: [[ADD8rr:%[0-9]+]]:gr8 = ADD8rr [[COPY]], [[DEF]], implicit-def $eflags
+ ; ALL: $al = COPY [[ADD8rr]]
+ ; ALL: RET 0, implicit $al
+ %0(s8) = COPY $dil
%1(s8) = G_IMPLICIT_DEF
%2(s8) = G_ADD %0, %1
- %al = COPY %2(s8)
- RET 0, implicit %al
+ $al = COPY %2(s8)
+ RET 0, implicit $al
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec256.mir b/llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec256.mir
index 55a3428..eb20a8b 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec256.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec256.mir
@@ -26,21 +26,21 @@
; AVX: [[DEF:%[0-9]+]]:vr256 = IMPLICIT_DEF
; AVX: [[COPY:%[0-9]+]]:vr128 = COPY [[DEF]].sub_xmm
; AVX: [[VEXTRACTF128rr:%[0-9]+]]:vr128 = VEXTRACTF128rr [[DEF]], 1
- ; AVX: %xmm0 = COPY [[COPY]]
- ; AVX: %xmm1 = COPY [[VEXTRACTF128rr]]
- ; AVX: RET 0, implicit %xmm0, implicit %xmm1
+ ; AVX: $xmm0 = COPY [[COPY]]
+ ; AVX: $xmm1 = COPY [[VEXTRACTF128rr]]
+ ; AVX: RET 0, implicit $xmm0, implicit $xmm1
; AVX512VL-LABEL: name: test_unmerge
; AVX512VL: [[DEF:%[0-9]+]]:vr256x = IMPLICIT_DEF
; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY [[DEF]].sub_xmm
; AVX512VL: [[VEXTRACTF32x4Z256rr:%[0-9]+]]:vr128x = VEXTRACTF32x4Z256rr [[DEF]], 1
- ; AVX512VL: %xmm0 = COPY [[COPY]]
- ; AVX512VL: %xmm1 = COPY [[VEXTRACTF32x4Z256rr]]
- ; AVX512VL: RET 0, implicit %xmm0, implicit %xmm1
+ ; AVX512VL: $xmm0 = COPY [[COPY]]
+ ; AVX512VL: $xmm1 = COPY [[VEXTRACTF32x4Z256rr]]
+ ; AVX512VL: RET 0, implicit $xmm0, implicit $xmm1
%0(<8 x s32>) = IMPLICIT_DEF
%1(<4 x s32>), %2(<4 x s32>) = G_UNMERGE_VALUES %0(<8 x s32>)
- %xmm0 = COPY %1(<4 x s32>)
- %xmm1 = COPY %2(<4 x s32>)
- RET 0, implicit %xmm0, implicit %xmm1
+ $xmm0 = COPY %1(<4 x s32>)
+ $xmm1 = COPY %2(<4 x s32>)
+ RET 0, implicit $xmm0, implicit $xmm1
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec512.mir b/llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec512.mir
index 4446ab5..31eea5c 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec512.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec512.mir
@@ -30,12 +30,12 @@
; ALL: [[VEXTRACTF32x4Zrr:%[0-9]+]]:vr128x = VEXTRACTF32x4Zrr [[DEF]], 1
; ALL: [[VEXTRACTF32x4Zrr1:%[0-9]+]]:vr128x = VEXTRACTF32x4Zrr [[DEF]], 2
; ALL: [[VEXTRACTF32x4Zrr2:%[0-9]+]]:vr128x = VEXTRACTF32x4Zrr [[DEF]], 3
- ; ALL: %xmm0 = COPY [[COPY]]
- ; ALL: RET 0, implicit %xmm0
+ ; ALL: $xmm0 = COPY [[COPY]]
+ ; ALL: RET 0, implicit $xmm0
%0(<16 x s32>) = IMPLICIT_DEF
%1(<4 x s32>), %2(<4 x s32>), %3(<4 x s32>), %4(<4 x s32>) = G_UNMERGE_VALUES %0(<16 x s32>)
- %xmm0 = COPY %1(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -54,11 +54,11 @@
; ALL: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF
; ALL: [[COPY:%[0-9]+]]:vr256x = COPY [[DEF]].sub_ymm
; ALL: [[VEXTRACTF64x4Zrr:%[0-9]+]]:vr256x = VEXTRACTF64x4Zrr [[DEF]], 1
- ; ALL: %ymm0 = COPY [[COPY]]
- ; ALL: RET 0, implicit %ymm0
+ ; ALL: $ymm0 = COPY [[COPY]]
+ ; ALL: RET 0, implicit $ymm0
%0(<16 x s32>) = IMPLICIT_DEF
%1(<8 x s32>), %2(<8 x s32>) = G_UNMERGE_VALUES %0(<16 x s32>)
- %ymm0 = COPY %1(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %1(<8 x s32>)
+ RET 0, implicit $ymm0
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-xor-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/select-xor-scalar.mir
index 26b07db..0604ce4 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-xor-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-xor-scalar.mir
@@ -38,19 +38,19 @@
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; ALL-LABEL: name: test_xor_i8
- ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY %dil
- ; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY %sil
- ; ALL: [[XOR8rr:%[0-9]+]]:gr8 = XOR8rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %al = COPY [[XOR8rr]]
- ; ALL: RET 0, implicit %al
- %0(s8) = COPY %dil
- %1(s8) = COPY %sil
+ ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY $dil
+ ; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY $sil
+ ; ALL: [[XOR8rr:%[0-9]+]]:gr8 = XOR8rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $al = COPY [[XOR8rr]]
+ ; ALL: RET 0, implicit $al
+ %0(s8) = COPY $dil
+ %1(s8) = COPY $sil
%2(s8) = G_XOR %0, %1
- %al = COPY %2(s8)
- RET 0, implicit %al
+ $al = COPY %2(s8)
+ RET 0, implicit $al
...
---
@@ -68,19 +68,19 @@
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; ALL-LABEL: name: test_xor_i16
- ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY %di
- ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY %si
- ; ALL: [[XOR16rr:%[0-9]+]]:gr16 = XOR16rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %ax = COPY [[XOR16rr]]
- ; ALL: RET 0, implicit %ax
- %0(s16) = COPY %di
- %1(s16) = COPY %si
+ ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY $di
+ ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY $si
+ ; ALL: [[XOR16rr:%[0-9]+]]:gr16 = XOR16rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $ax = COPY [[XOR16rr]]
+ ; ALL: RET 0, implicit $ax
+ %0(s16) = COPY $di
+ %1(s16) = COPY $si
%2(s16) = G_XOR %0, %1
- %ax = COPY %2(s16)
- RET 0, implicit %ax
+ $ax = COPY %2(s16)
+ RET 0, implicit $ax
...
---
@@ -98,19 +98,19 @@
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; ALL-LABEL: name: test_xor_i32
- ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; ALL: [[XOR32rr:%[0-9]+]]:gr32 = XOR32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %eax = COPY [[XOR32rr]]
- ; ALL: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; ALL: [[XOR32rr:%[0-9]+]]:gr32 = XOR32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $eax = COPY [[XOR32rr]]
+ ; ALL: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s32) = G_XOR %0, %1
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -128,18 +128,18 @@
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
; ALL-LABEL: name: test_xor_i64
- ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; ALL: [[XOR64rr:%[0-9]+]]:gr64 = XOR64rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %rax = COPY [[XOR64rr]]
- ; ALL: RET 0, implicit %rax
- %0(s64) = COPY %rdi
- %1(s64) = COPY %rsi
+ ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; ALL: [[XOR64rr:%[0-9]+]]:gr64 = XOR64rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $rax = COPY [[XOR64rr]]
+ ; ALL: RET 0, implicit $rax
+ %0(s64) = COPY $rdi
+ %1(s64) = COPY $rsi
%2(s64) = G_XOR %0, %1
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/x32-select-frameIndex.mir b/llvm/test/CodeGen/X86/GlobalISel/x32-select-frameIndex.mir
index f719028..df6f35b 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x32-select-frameIndex.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x32-select-frameIndex.mir
@@ -24,11 +24,11 @@
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: allocai32
- ; CHECK: [[LEA64_32r:%[0-9]+]]:gr32 = LEA64_32r %stack.0.ptr1, 1, %noreg, 0, %noreg
- ; CHECK: %eax = COPY [[LEA64_32r]]
- ; CHECK: RET 0, implicit %eax
+ ; CHECK: [[LEA64_32r:%[0-9]+]]:gr32 = LEA64_32r %stack.0.ptr1, 1, $noreg, 0, $noreg
+ ; CHECK: $eax = COPY [[LEA64_32r]]
+ ; CHECK: RET 0, implicit $eax
%0:gpr(p0) = G_FRAME_INDEX %stack.0.ptr1
- %eax = COPY %0(p0)
- RET 0, implicit %eax
+ $eax = COPY %0(p0)
+ RET 0, implicit $eax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-GV.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-GV.mir
index 24d83db..b6075eb 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-GV.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-GV.mir
@@ -19,12 +19,12 @@
registers:
- { id: 0, class: _, preferred-register: '' }
# ALL: %0:_(p0) = G_GLOBAL_VALUE @g_int
-# ALL-NEXT: %eax = COPY %0(p0)
-# ALL-NEXT: RET 0, implicit %rax
+# ALL-NEXT: $eax = COPY %0(p0)
+# ALL-NEXT: RET 0, implicit $rax
body: |
bb.1.entry:
%0(p0) = G_GLOBAL_VALUE @g_int
- %eax = COPY %0(p0)
- RET 0, implicit %rax
+ $eax = COPY %0(p0)
+ RET 0, implicit $rax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86-select-frameIndex.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-select-frameIndex.mir
index 0c094c9..5824441 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86-select-frameIndex.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86-select-frameIndex.mir
@@ -24,11 +24,11 @@
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: allocai32
- ; CHECK: [[LEA32r:%[0-9]+]]:gr32 = LEA32r %stack.0.ptr1, 1, %noreg, 0, %noreg
- ; CHECK: %eax = COPY [[LEA32r]]
- ; CHECK: RET 0, implicit %eax
+ ; CHECK: [[LEA32r:%[0-9]+]]:gr32 = LEA32r %stack.0.ptr1, 1, $noreg, 0, $noreg
+ ; CHECK: $eax = COPY [[LEA32r]]
+ ; CHECK: RET 0, implicit $eax
%0:gpr(p0) = G_FRAME_INDEX %stack.0.ptr1
- %eax = COPY %0(p0)
- RET 0, implicit %eax
+ $eax = COPY %0(p0)
+ RET 0, implicit $eax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86_64-legalize-GV.mir b/llvm/test/CodeGen/X86/GlobalISel/x86_64-legalize-GV.mir
index 8ba8fe2..21f76da 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86_64-legalize-GV.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86_64-legalize-GV.mir
@@ -19,12 +19,12 @@
registers:
- { id: 0, class: _, preferred-register: '' }
# ALL: %0:_(p0) = G_GLOBAL_VALUE @g_int
-# ALL-NEXT: %rax = COPY %0(p0)
-# ALL-NEXT: RET 0, implicit %rax
+# ALL-NEXT: $rax = COPY %0(p0)
+# ALL-NEXT: RET 0, implicit $rax
body: |
bb.1.entry:
%0(p0) = G_GLOBAL_VALUE @g_int
- %rax = COPY %0(p0)
- RET 0, implicit %rax
+ $rax = COPY %0(p0)
+ RET 0, implicit $rax
...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-frameIndex.mir b/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-frameIndex.mir
index a8ef4d3..aea3766 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-frameIndex.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-frameIndex.mir
@@ -24,11 +24,11 @@
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: allocai32
- ; CHECK: [[LEA64r:%[0-9]+]]:gr64 = LEA64r %stack.0.ptr1, 1, %noreg, 0, %noreg
- ; CHECK: %rax = COPY [[LEA64r]]
- ; CHECK: RET 0, implicit %rax
+ ; CHECK: [[LEA64r:%[0-9]+]]:gr64 = LEA64r %stack.0.ptr1, 1, $noreg, 0, $noreg
+ ; CHECK: $rax = COPY [[LEA64r]]
+ ; CHECK: RET 0, implicit $rax
%0:gpr(p0) = G_FRAME_INDEX %stack.0.ptr1
- %rax = COPY %0(p0)
- RET 0, implicit %rax
+ $rax = COPY %0(p0)
+ RET 0, implicit $rax
...
diff --git a/llvm/test/CodeGen/X86/add-i64.ll b/llvm/test/CodeGen/X86/add-i64.ll
index 4edc6aa..9b8bbfd 100644
--- a/llvm/test/CodeGen/X86/add-i64.ll
+++ b/llvm/test/CodeGen/X86/add-i64.ll
@@ -17,7 +17,7 @@
; X64-NEXT: movl %edi, %eax
; X64-NEXT: addq $63, %rax
; X64-NEXT: shrq $6, %rax
-; X64-NEXT: # kill: def %eax killed %eax killed %rax
+; X64-NEXT: # kill: def $eax killed $eax killed $rax
; X64-NEXT: retq
%2 = zext i32 %0 to i64
%3 = add nuw nsw i64 %2, 63
diff --git a/llvm/test/CodeGen/X86/add-sub-nsw-nuw.ll b/llvm/test/CodeGen/X86/add-sub-nsw-nuw.ll
index 703860d..be6d20e 100644
--- a/llvm/test/CodeGen/X86/add-sub-nsw-nuw.ll
+++ b/llvm/test/CodeGen/X86/add-sub-nsw-nuw.ll
@@ -10,7 +10,7 @@
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: negl %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retl
entry:
%or = or i64 %argc, -4294967296
diff --git a/llvm/test/CodeGen/X86/add.ll b/llvm/test/CodeGen/X86/add.ll
index 3511bae..650b651 100644
--- a/llvm/test/CodeGen/X86/add.ll
+++ b/llvm/test/CodeGen/X86/add.ll
@@ -176,14 +176,14 @@
;
; X64-LINUX-LABEL: test6:
; X64-LINUX: # %bb.0: # %entry
-; X64-LINUX-NEXT: # kill: def %esi killed %esi def %rsi
+; X64-LINUX-NEXT: # kill: def $esi killed $esi def $rsi
; X64-LINUX-NEXT: shlq $32, %rsi
; X64-LINUX-NEXT: leaq (%rsi,%rdi), %rax
; X64-LINUX-NEXT: retq
;
; X64-WIN32-LABEL: test6:
; X64-WIN32: # %bb.0: # %entry
-; X64-WIN32-NEXT: # kill: def %edx killed %edx def %rdx
+; X64-WIN32-NEXT: # kill: def $edx killed $edx def $rdx
; X64-WIN32-NEXT: shlq $32, %rdx
; X64-WIN32-NEXT: leaq (%rdx,%rcx), %rax
; X64-WIN32-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/addcarry.ll b/llvm/test/CodeGen/X86/addcarry.ll
index c9b3dbb..7bd0b9d 100644
--- a/llvm/test/CodeGen/X86/addcarry.ll
+++ b/llvm/test/CodeGen/X86/addcarry.ll
@@ -84,7 +84,7 @@
define i8 @e(i32* nocapture %a, i32 %b) nounwind {
; CHECK-LABEL: e:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %esi killed %esi def %rsi
+; CHECK-NEXT: # kill: def $esi killed $esi def $rsi
; CHECK-NEXT: movl (%rdi), %ecx
; CHECK-NEXT: leal (%rsi,%rcx), %edx
; CHECK-NEXT: addl %esi, %edx
diff --git a/llvm/test/CodeGen/X86/and-encoding.ll b/llvm/test/CodeGen/X86/and-encoding.ll
index 04d0d69..fe57842 100644
--- a/llvm/test/CodeGen/X86/and-encoding.ll
+++ b/llvm/test/CodeGen/X86/and-encoding.ll
@@ -110,7 +110,7 @@
; CHECK-NEXT: imulq %rcx, %rax # encoding: [0x48,0x0f,0xaf,0xc1]
; CHECK-NEXT: shrq $36, %rax # encoding: [0x48,0xc1,0xe8,0x24]
; CHECK-NEXT: andl $-128, %eax # encoding: [0x83,0xe0,0x80]
-; CHECK-NEXT: # kill: def %eax killed %eax killed %rax
+; CHECK-NEXT: # kill: def $eax killed $eax killed $rax
; CHECK-NEXT: retq # encoding: [0xc3]
%div = udiv i32 %x, 17
%and = and i32 %div, 268435328
diff --git a/llvm/test/CodeGen/X86/anyext.ll b/llvm/test/CodeGen/X86/anyext.ll
index f0b5143..e1435d6 100644
--- a/llvm/test/CodeGen/X86/anyext.ll
+++ b/llvm/test/CodeGen/X86/anyext.ll
@@ -8,7 +8,7 @@
; X32-LABEL: foo:
; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: # kill: def %eax killed %eax def %ax
+; X32-NEXT: # kill: def $eax killed $eax def $ax
; X32-NEXT: divb {{[0-9]+}}(%esp)
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: andl $1, %eax
@@ -17,7 +17,7 @@
; X64-LABEL: foo:
; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: # kill: def %eax killed %eax def %ax
+; X64-NEXT: # kill: def $eax killed $eax def $ax
; X64-NEXT: divb %sil
; X64-NEXT: movzbl %al, %eax
; X64-NEXT: andl $1, %eax
@@ -35,7 +35,7 @@
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: divw {{[0-9]+}}(%esp)
-; X32-NEXT: # kill: def %ax killed %ax def %eax
+; X32-NEXT: # kill: def $ax killed $ax def $eax
; X32-NEXT: andl $1, %eax
; X32-NEXT: retl
;
@@ -44,7 +44,7 @@
; X64-NEXT: xorl %edx, %edx
; X64-NEXT: movl %edi, %eax
; X64-NEXT: divw %si
-; X64-NEXT: # kill: def %ax killed %ax def %eax
+; X64-NEXT: # kill: def $ax killed $ax def $eax
; X64-NEXT: andl $1, %eax
; X64-NEXT: retq
%q = trunc i32 %p to i16
diff --git a/llvm/test/CodeGen/X86/atomic-eflags-reuse.ll b/llvm/test/CodeGen/X86/atomic-eflags-reuse.ll
index fc2b567..b8b2b7f 100644
--- a/llvm/test/CodeGen/X86/atomic-eflags-reuse.ll
+++ b/llvm/test/CodeGen/X86/atomic-eflags-reuse.ll
@@ -93,7 +93,7 @@
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: lock xaddq %rax, (%rdi)
; CHECK-NEXT: shrq $63, %rax
-; CHECK-NEXT: # kill: def %al killed %al killed %rax
+; CHECK-NEXT: # kill: def $al killed $al killed $rax
; CHECK-NEXT: retq
entry:
%tmp0 = atomicrmw add i64* %p, i64 1 seq_cst
diff --git a/llvm/test/CodeGen/X86/avx-cast.ll b/llvm/test/CodeGen/X86/avx-cast.ll
index 30ca0b0..868e61c 100644
--- a/llvm/test/CodeGen/X86/avx-cast.ll
+++ b/llvm/test/CodeGen/X86/avx-cast.ll
@@ -9,7 +9,7 @@
define <8 x float> @castA(<4 x float> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castA:
; AVX: ## %bb.0:
-; AVX-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT: retq
@@ -20,7 +20,7 @@
define <4 x double> @castB(<2 x double> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castB:
; AVX: ## %bb.0:
-; AVX-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT: retq
@@ -33,7 +33,7 @@
define <4 x i64> @castC(<2 x i64> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castC:
; AVX: ## %bb.0:
-; AVX-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT: retq
@@ -47,7 +47,7 @@
define <4 x float> @castD(<8 x float> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castD:
; AVX: ## %bb.0:
-; AVX-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%shuffle.i = shufflevector <8 x float> %m, <8 x float> %m, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -57,7 +57,7 @@
define <2 x i64> @castE(<4 x i64> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castE:
; AVX: ## %bb.0:
-; AVX-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%shuffle.i = shufflevector <4 x i64> %m, <4 x i64> %m, <2 x i32> <i32 0, i32 1>
@@ -67,7 +67,7 @@
define <2 x double> @castF(<4 x double> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castF:
; AVX: ## %bb.0:
-; AVX-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%shuffle.i = shufflevector <4 x double> %m, <4 x double> %m, <2 x i32> <i32 0, i32 1>
diff --git a/llvm/test/CodeGen/X86/avx-cmp.ll b/llvm/test/CodeGen/X86/avx-cmp.ll
index 968d8e3..2e15168 100644
--- a/llvm/test/CodeGen/X86/avx-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx-cmp.ll
@@ -197,7 +197,7 @@
; CHECK-NEXT: vcmpeqsd %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovq %xmm0, %rax
; CHECK-NEXT: andl $1, %eax
-; CHECK-NEXT: # kill: def %eax killed %eax killed %rax
+; CHECK-NEXT: # kill: def $eax killed $eax killed $rax
; CHECK-NEXT: retq
%cmp29 = fcmp oeq double undef, 0.000000e+00
%res = zext i1 %cmp29 to i32
diff --git a/llvm/test/CodeGen/X86/avx-insertelt.ll b/llvm/test/CodeGen/X86/avx-insertelt.ll
index 3b19c0d..e828627 100644
--- a/llvm/test/CodeGen/X86/avx-insertelt.ll
+++ b/llvm/test/CodeGen/X86/avx-insertelt.ll
@@ -5,7 +5,7 @@
define <8 x float> @insert_f32(<8 x float> %y, float %f, <8 x float> %x) {
; ALL-LABEL: insert_f32:
; ALL: # %bb.0:
-; ALL-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; ALL-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
; ALL-NEXT: retq
%i0 = insertelement <8 x float> %y, float %f, i32 0
@@ -15,7 +15,7 @@
define <4 x double> @insert_f64(<4 x double> %y, double %f, <4 x double> %x) {
; ALL-LABEL: insert_f64:
; ALL: # %bb.0:
-; ALL-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; ALL-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
; ALL-NEXT: retq
%i0 = insertelement <4 x double> %y, double %f, i32 0
diff --git a/llvm/test/CodeGen/X86/avx-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx-intrinsics-fast-isel.ll
index d86866e..0ad8272 100644
--- a/llvm/test/CodeGen/X86/avx-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/avx-intrinsics-fast-isel.ll
@@ -316,12 +316,12 @@
define <4 x double> @test_mm256_castpd128_pd256(<2 x double> %a0) nounwind {
; X32-LABEL: test_mm256_castpd128_pd256:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castpd128_pd256:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-NEXT: retq
%res = shufflevector <2 x double> %a0, <2 x double> %a0, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
ret <4 x double> %res
@@ -330,13 +330,13 @@
define <2 x double> @test_mm256_castpd256_pd128(<4 x double> %a0) nounwind {
; X32-LABEL: test_mm256_castpd256_pd128:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castpd256_pd128:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = shufflevector <4 x double> %a0, <4 x double> %a0, <2 x i32> <i32 0, i32 1>
@@ -370,12 +370,12 @@
define <8 x float> @test_mm256_castps128_ps256(<4 x float> %a0) nounwind {
; X32-LABEL: test_mm256_castps128_ps256:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castps128_ps256:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-NEXT: retq
%res = shufflevector <4 x float> %a0, <4 x float> %a0, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
ret <8 x float> %res
@@ -384,13 +384,13 @@
define <4 x float> @test_mm256_castps256_ps128(<8 x float> %a0) nounwind {
; X32-LABEL: test_mm256_castps256_ps128:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castps256_ps128:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = shufflevector <8 x float> %a0, <8 x float> %a0, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -400,12 +400,12 @@
define <4 x i64> @test_mm256_castsi128_si256(<2 x i64> %a0) nounwind {
; X32-LABEL: test_mm256_castsi128_si256:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castsi128_si256:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-NEXT: retq
%res = shufflevector <2 x i64> %a0, <2 x i64> %a0, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
ret <4 x i64> %res
@@ -438,13 +438,13 @@
define <2 x i64> @test_mm256_castsi256_si128(<4 x i64> %a0) nounwind {
; X32-LABEL: test_mm256_castsi256_si128:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castsi256_si128:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = shufflevector <4 x i64> %a0, <4 x i64> %a0, <2 x i32> <i32 0, i32 1>
@@ -1043,13 +1043,13 @@
define <4 x double> @test_mm256_insertf128_pd(<4 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm256_insertf128_pd:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; X32-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; X32-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_insertf128_pd:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; X64-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; X64-NEXT: retq
%ext = shufflevector <2 x double> %a1, <2 x double> %a1, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
@@ -1075,13 +1075,13 @@
define <4 x i64> @test_mm256_insertf128_si256(<4 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm256_insertf128_si256:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; X32-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; X32-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_insertf128_si256:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; X64-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; X64-NEXT: retq
%ext = shufflevector <2 x i64> %a1, <2 x i64> %a1, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
@@ -2188,13 +2188,13 @@
define <8 x float> @test_mm256_set_m128(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm256_set_m128:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; X32-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set_m128:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; X64-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X64-NEXT: retq
%res = shufflevector <4 x float> %a1, <4 x float> %a0, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -2204,13 +2204,13 @@
define <4 x double> @test_mm256_set_m128d(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm256_set_m128d:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; X32-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set_m128d:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; X64-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X64-NEXT: retq
%arg0 = bitcast <2 x double> %a0 to <4 x float>
@@ -2223,13 +2223,13 @@
define <4 x i64> @test_mm256_set_m128i(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm256_set_m128i:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; X32-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set_m128i:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; X64-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x float>
@@ -2825,13 +2825,13 @@
define <8 x float> @test_mm256_setr_m128(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm256_setr_m128:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_setr_m128:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = shufflevector <4 x float> %a0, <4 x float> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -2841,13 +2841,13 @@
define <4 x double> @test_mm256_setr_m128d(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm256_setr_m128d:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_setr_m128d:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X64-NEXT: retq
%arg0 = bitcast <2 x double> %a0 to <4 x float>
@@ -2860,13 +2860,13 @@
define <4 x i64> @test_mm256_setr_m128i(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm256_setr_m128i:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_setr_m128i:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x float>
diff --git a/llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll b/llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
index 6eb0b95..1bf7bb3 100644
--- a/llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
+++ b/llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
@@ -39,7 +39,7 @@
define <8 x i32> @test_x86_avx_vinsertf128_si_256_2(<8 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_x86_avx_vinsertf128_si_256_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; CHECK-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32> %a0, <4 x i32> %a1, i8 2)
@@ -88,7 +88,7 @@
define <2 x double> @test_x86_avx_extractf128_pd_256_2(<4 x double> %a0) {
; CHECK-LABEL: test_x86_avx_extractf128_pd_256_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: ret{{[l|q]}}
%res = call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a0, i8 2)
diff --git a/llvm/test/CodeGen/X86/avx-load-store.ll b/llvm/test/CodeGen/X86/avx-load-store.ll
index 5a64db0..e82db75 100644
--- a/llvm/test/CodeGen/X86/avx-load-store.ll
+++ b/llvm/test/CodeGen/X86/avx-load-store.ll
@@ -85,7 +85,7 @@
; CHECK_O0-LABEL: mov00:
; CHECK_O0: # %bb.0:
; CHECK_O0-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK_O0-NEXT: # implicit-def: %ymm1
+; CHECK_O0-NEXT: # implicit-def: $ymm1
; CHECK_O0-NEXT: vmovaps %xmm0, %xmm1
; CHECK_O0-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK_O0-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm2[1,2,3,4,5,6,7]
@@ -104,7 +104,7 @@
; CHECK_O0-LABEL: mov01:
; CHECK_O0: # %bb.0:
; CHECK_O0-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK_O0-NEXT: # implicit-def: %ymm1
+; CHECK_O0-NEXT: # implicit-def: $ymm1
; CHECK_O0-NEXT: vmovaps %xmm0, %xmm1
; CHECK_O0-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK_O0-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm2[1,2,3]
@@ -121,7 +121,7 @@
;
; CHECK_O0-LABEL: storev16i16:
; CHECK_O0: # %bb.0:
-; CHECK_O0-NEXT: # implicit-def: %rax
+; CHECK_O0-NEXT: # implicit-def: $rax
; CHECK_O0-NEXT: vmovdqa %ymm0, (%rax)
store <16 x i16> %a, <16 x i16>* undef, align 32
unreachable
@@ -135,7 +135,7 @@
;
; CHECK_O0-LABEL: storev16i16_01:
; CHECK_O0: # %bb.0:
-; CHECK_O0-NEXT: # implicit-def: %rax
+; CHECK_O0-NEXT: # implicit-def: $rax
; CHECK_O0-NEXT: vmovdqu %ymm0, (%rax)
store <16 x i16> %a, <16 x i16>* undef, align 4
unreachable
@@ -148,7 +148,7 @@
;
; CHECK_O0-LABEL: storev32i8:
; CHECK_O0: # %bb.0:
-; CHECK_O0-NEXT: # implicit-def: %rax
+; CHECK_O0-NEXT: # implicit-def: $rax
; CHECK_O0-NEXT: vmovdqa %ymm0, (%rax)
store <32 x i8> %a, <32 x i8>* undef, align 32
unreachable
@@ -162,7 +162,7 @@
;
; CHECK_O0-LABEL: storev32i8_01:
; CHECK_O0: # %bb.0:
-; CHECK_O0-NEXT: # implicit-def: %rax
+; CHECK_O0-NEXT: # implicit-def: $rax
; CHECK_O0-NEXT: vmovdqu %ymm0, (%rax)
store <32 x i8> %a, <32 x i8>* undef, align 4
unreachable
@@ -179,7 +179,7 @@
;
; CHECK_O0-LABEL: double_save:
; CHECK_O0: # %bb.0:
-; CHECK_O0-NEXT: # implicit-def: %ymm2
+; CHECK_O0-NEXT: # implicit-def: $ymm2
; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2
; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2
; CHECK_O0-NEXT: vmovdqu %ymm2, (%rdi)
@@ -211,13 +211,13 @@
;
; CHECK_O0-LABEL: f_f:
; CHECK_O0: # %bb.0: # %allocas
-; CHECK_O0-NEXT: # implicit-def: %al
+; CHECK_O0-NEXT: # implicit-def: $al
; CHECK_O0-NEXT: testb $1, %al
; CHECK_O0-NEXT: jne .LBB8_1
; CHECK_O0-NEXT: jmp .LBB8_2
; CHECK_O0-NEXT: .LBB8_1: # %cif_mask_all
; CHECK_O0-NEXT: .LBB8_2: # %cif_mask_mixed
-; CHECK_O0-NEXT: # implicit-def: %al
+; CHECK_O0-NEXT: # implicit-def: $al
; CHECK_O0-NEXT: testb $1, %al
; CHECK_O0-NEXT: jne .LBB8_3
; CHECK_O0-NEXT: jmp .LBB8_4
@@ -225,8 +225,8 @@
; CHECK_O0-NEXT: movl $-1, %eax
; CHECK_O0-NEXT: vmovd %eax, %xmm0
; CHECK_O0-NEXT: vmovaps %xmm0, %xmm1
-; CHECK_O0-NEXT: # implicit-def: %rcx
-; CHECK_O0-NEXT: # implicit-def: %ymm2
+; CHECK_O0-NEXT: # implicit-def: $rcx
+; CHECK_O0-NEXT: # implicit-def: $ymm2
; CHECK_O0-NEXT: vmaskmovps %ymm2, %ymm1, (%rcx)
; CHECK_O0-NEXT: .LBB8_4: # %cif_mixed_test_any_check
allocas:
@@ -259,7 +259,7 @@
; CHECK_O0: # %bb.0:
; CHECK_O0-NEXT: vmovdqu (%rsi), %xmm0
; CHECK_O0-NEXT: vmovdqu 16(%rsi), %xmm1
-; CHECK_O0-NEXT: # implicit-def: %ymm2
+; CHECK_O0-NEXT: # implicit-def: $ymm2
; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2
; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2
; CHECK_O0-NEXT: vmovdqu %ymm2, (%rdi)
@@ -304,7 +304,7 @@
; CHECK_O0: # %bb.0:
; CHECK_O0-NEXT: vmovdqa (%rsi), %xmm0
; CHECK_O0-NEXT: vmovdqa 16(%rsi), %xmm1
-; CHECK_O0-NEXT: # implicit-def: %ymm2
+; CHECK_O0-NEXT: # implicit-def: $ymm2
; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2
; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2
; CHECK_O0-NEXT: vmovdqu %ymm2, (%rdi)
diff --git a/llvm/test/CodeGen/X86/avx-splat.ll b/llvm/test/CodeGen/X86/avx-splat.ll
index 9cd05a3..4738262 100644
--- a/llvm/test/CodeGen/X86/avx-splat.ll
+++ b/llvm/test/CodeGen/X86/avx-splat.ll
@@ -61,7 +61,7 @@
; CHECK: # %bb.0: # %for_exit499
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testb %al, %al
-; CHECK-NEXT: # implicit-def: %ymm0
+; CHECK-NEXT: # implicit-def: $ymm0
; CHECK-NEXT: jne .LBB4_2
; CHECK-NEXT: # %bb.1: # %load.i1247
; CHECK-NEXT: pushq %rbp
diff --git a/llvm/test/CodeGen/X86/avx-vinsertf128.ll b/llvm/test/CodeGen/X86/avx-vinsertf128.ll
index 13b47c3..7615f65 100644
--- a/llvm/test/CodeGen/X86/avx-vinsertf128.ll
+++ b/llvm/test/CodeGen/X86/avx-vinsertf128.ll
@@ -75,7 +75,7 @@
define <4 x double> @insert_undef_pd(<4 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: insert_undef_pd:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; CHECK-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; CHECK-NEXT: vmovaps %ymm1, %ymm0
; CHECK-NEXT: retq
%res = call <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double> undef, <2 x double> %a1, i8 0)
@@ -86,7 +86,7 @@
define <8 x float> @insert_undef_ps(<8 x float> %a0, <4 x float> %a1) {
; CHECK-LABEL: insert_undef_ps:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; CHECK-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; CHECK-NEXT: vmovaps %ymm1, %ymm0
; CHECK-NEXT: retq
%res = call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> undef, <4 x float> %a1, i8 0)
@@ -97,7 +97,7 @@
define <8 x i32> @insert_undef_si(<8 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: insert_undef_si:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; CHECK-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; CHECK-NEXT: vmovaps %ymm1, %ymm0
; CHECK-NEXT: retq
%res = call <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32> undef, <4 x i32> %a1, i8 0)
diff --git a/llvm/test/CodeGen/X86/avx-vzeroupper.ll b/llvm/test/CodeGen/X86/avx-vzeroupper.ll
index e69a290..4e0b8aa 100644
--- a/llvm/test/CodeGen/X86/avx-vzeroupper.ll
+++ b/llvm/test/CodeGen/X86/avx-vzeroupper.ll
@@ -82,14 +82,14 @@
; VZ-LABEL: test02:
; VZ: # %bb.0:
; VZ-NEXT: vaddps %ymm1, %ymm0, %ymm0
-; VZ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; VZ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; VZ-NEXT: vzeroupper
; VZ-NEXT: jmp do_sse # TAILCALL
;
; NO-VZ-LABEL: test02:
; NO-VZ: # %bb.0:
; NO-VZ-NEXT: vaddps %ymm1, %ymm0, %ymm0
-; NO-VZ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; NO-VZ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; NO-VZ-NEXT: jmp do_sse # TAILCALL
%add.i = fadd <8 x float> %a, %b
%add.low = call <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float> %add.i, i8 0)
@@ -222,10 +222,10 @@
; VZ-LABEL: test04:
; VZ: # %bb.0:
; VZ-NEXT: pushq %rax
-; VZ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; VZ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; VZ-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; VZ-NEXT: callq do_avx
-; VZ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; VZ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; VZ-NEXT: popq %rax
; VZ-NEXT: vzeroupper
; VZ-NEXT: retq
@@ -233,10 +233,10 @@
; NO-VZ-LABEL: test04:
; NO-VZ: # %bb.0:
; NO-VZ-NEXT: pushq %rax
-; NO-VZ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; NO-VZ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; NO-VZ-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; NO-VZ-NEXT: callq do_avx
-; NO-VZ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; NO-VZ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; NO-VZ-NEXT: popq %rax
; NO-VZ-NEXT: retq
%shuf = shufflevector <4 x float> %a, <4 x float> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
diff --git a/llvm/test/CodeGen/X86/avx2-conversions.ll b/llvm/test/CodeGen/X86/avx2-conversions.ll
index b6f6490..d4c2330 100644
--- a/llvm/test/CodeGen/X86/avx2-conversions.ll
+++ b/llvm/test/CodeGen/X86/avx2-conversions.ll
@@ -9,7 +9,7 @@
; X32-SLOW: # %bb.0:
; X32-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; X32-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X32-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X32-SLOW-NEXT: vzeroupper
; X32-SLOW-NEXT: retl
;
@@ -17,7 +17,7 @@
; X32-FAST: # %bb.0:
; X32-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
; X32-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
-; X32-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X32-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X32-FAST-NEXT: vzeroupper
; X32-FAST-NEXT: retl
;
@@ -25,7 +25,7 @@
; X64-SLOW: # %bb.0:
; X64-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; X64-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X64-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X64-SLOW-NEXT: vzeroupper
; X64-SLOW-NEXT: retq
;
@@ -33,7 +33,7 @@
; X64-FAST: # %bb.0:
; X64-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
; X64-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
-; X64-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X64-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X64-FAST-NEXT: vzeroupper
; X64-FAST-NEXT: retq
%B = trunc <4 x i64> %A to <4 x i32>
@@ -45,7 +45,7 @@
; X32: # %bb.0:
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
@@ -53,7 +53,7 @@
; X64: # %bb.0:
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%B = trunc <8 x i32> %A to <8 x i16>
diff --git a/llvm/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
index e0baf84..979d4ec 100644
--- a/llvm/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
@@ -355,7 +355,7 @@
define <4 x i64> @test_mm256_broadcastsi128_si256(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_broadcastsi128_si256:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <2 x i64> %a0, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -1447,7 +1447,7 @@
define <4 x i64> @test0_mm256_inserti128_si256(<4 x i64> %a0, <2 x i64> %a1) nounwind {
; CHECK-LABEL: test0_mm256_inserti128_si256:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; CHECK-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; CHECK-NEXT: ret{{[l|q]}}
%ext = shufflevector <2 x i64> %a1, <2 x i64> %a1, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
diff --git a/llvm/test/CodeGen/X86/avx2-masked-gather.ll b/llvm/test/CodeGen/X86/avx2-masked-gather.ll
index eb482c2..0058c8b 100644
--- a/llvm/test/CodeGen/X86/avx2-masked-gather.ll
+++ b/llvm/test/CodeGen/X86/avx2-masked-gather.ll
@@ -32,7 +32,7 @@
; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %xmm2
+; NOGATHER-NEXT: # implicit-def: $xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB0_2
; NOGATHER-NEXT: # %bb.1: # %cond.load
@@ -82,7 +82,7 @@
; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %xmm2
+; NOGATHER-NEXT: # implicit-def: $xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB1_2
; NOGATHER-NEXT: # %bb.1: # %cond.load
@@ -134,7 +134,7 @@
; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %xmm2
+; NOGATHER-NEXT: # implicit-def: $xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB2_2
; NOGATHER-NEXT: # %bb.1: # %cond.load
@@ -182,7 +182,7 @@
; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %xmm2
+; NOGATHER-NEXT: # implicit-def: $xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB3_2
; NOGATHER-NEXT: # %bb.1: # %cond.load
@@ -229,7 +229,7 @@
; NOGATHER-LABEL: masked_gather_v4i32:
; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vpextrb $0, %xmm1, %eax
-; NOGATHER-NEXT: # implicit-def: %xmm3
+; NOGATHER-NEXT: # implicit-def: $xmm3
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB4_2
; NOGATHER-NEXT: # %bb.1: # %cond.load
@@ -289,7 +289,7 @@
; NOGATHER-LABEL: masked_gather_v4float:
; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vpextrb $0, %xmm1, %eax
-; NOGATHER-NEXT: # implicit-def: %xmm3
+; NOGATHER-NEXT: # implicit-def: $xmm3
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB5_2
; NOGATHER-NEXT: # %bb.1: # %cond.load
@@ -360,7 +360,7 @@
; NOGATHER-NEXT: vmovdqa (%rdi), %ymm4
; NOGATHER-NEXT: vmovdqa 32(%rdi), %ymm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %ymm2
+; NOGATHER-NEXT: # implicit-def: $ymm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB6_2
; NOGATHER-NEXT: # %bb.1: # %cond.load
@@ -476,7 +476,7 @@
; NOGATHER-NEXT: vmovdqa (%rdi), %ymm4
; NOGATHER-NEXT: vmovdqa 32(%rdi), %ymm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %ymm2
+; NOGATHER-NEXT: # implicit-def: $ymm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB7_2
; NOGATHER-NEXT: # %bb.1: # %cond.load
@@ -587,7 +587,7 @@
; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %ymm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %ymm2
+; NOGATHER-NEXT: # implicit-def: $ymm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB8_2
; NOGATHER-NEXT: # %bb.1: # %cond.load
@@ -662,7 +662,7 @@
; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %ymm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %ymm2
+; NOGATHER-NEXT: # implicit-def: $ymm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB9_2
; NOGATHER-NEXT: # %bb.1: # %cond.load
@@ -735,7 +735,7 @@
; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %xmm2
+; NOGATHER-NEXT: # implicit-def: $xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB10_2
; NOGATHER-NEXT: # %bb.1: # %cond.load
@@ -782,7 +782,7 @@
; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %xmm2
+; NOGATHER-NEXT: # implicit-def: $xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB11_2
; NOGATHER-NEXT: # %bb.1: # %cond.load
diff --git a/llvm/test/CodeGen/X86/avx2-shift.ll b/llvm/test/CodeGen/X86/avx2-shift.ll
index 022c9f4..f4924b3 100644
--- a/llvm/test/CodeGen/X86/avx2-shift.ll
+++ b/llvm/test/CodeGen/X86/avx2-shift.ll
@@ -532,7 +532,7 @@
; X32-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
@@ -543,7 +543,7 @@
; X64-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = shl <8 x i16> %lhs, %rhs
@@ -582,7 +582,7 @@
; X32-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
@@ -593,7 +593,7 @@
; X64-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = lshr <8 x i16> %lhs, %rhs
diff --git a/llvm/test/CodeGen/X86/avx2-vector-shifts.ll b/llvm/test/CodeGen/X86/avx2-vector-shifts.ll
index 321be3d..3dbaab0 100644
--- a/llvm/test/CodeGen/X86/avx2-vector-shifts.ll
+++ b/llvm/test/CodeGen/X86/avx2-vector-shifts.ll
@@ -431,7 +431,7 @@
; X32-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
@@ -442,7 +442,7 @@
; X64-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%shl = shl <8 x i16> %r, %a
@@ -639,7 +639,7 @@
; X32-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
@@ -650,7 +650,7 @@
; X64-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%lshr = lshr <8 x i16> %r, %a
diff --git a/llvm/test/CodeGen/X86/avx512-arith.ll b/llvm/test/CodeGen/X86/avx512-arith.ll
index 7c8a18a..f44e276 100644
--- a/llvm/test/CodeGen/X86/avx512-arith.ll
+++ b/llvm/test/CodeGen/X86/avx512-arith.ll
@@ -176,10 +176,10 @@
;
; AVX512DQ-LABEL: imulq256:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vpmullq %zmm0, %zmm1, %zmm0
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: imulq256:
@@ -229,10 +229,10 @@
;
; AVX512DQ-LABEL: imulq128:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vpmullq %zmm0, %zmm1, %zmm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -708,7 +708,7 @@
define <8 x double> @test_mask_vminpd(<8 x double> %dst, <8 x double> %i,
; AVX512F-LABEL: test_mask_vminpd:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3
+; AVX512F-NEXT: # kill: def $ymm3 killed $ymm3 def $zmm3
; AVX512F-NEXT: vptestmd %zmm3, %zmm3, %k1
; AVX512F-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512F-NEXT: retq
@@ -721,14 +721,14 @@
;
; AVX512BW-LABEL: test_mask_vminpd:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3
+; AVX512BW-NEXT: # kill: def $ymm3 killed $ymm3 def $zmm3
; AVX512BW-NEXT: vptestmd %zmm3, %zmm3, %k1
; AVX512BW-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_mask_vminpd:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3
+; AVX512DQ-NEXT: # kill: def $ymm3 killed $ymm3 def $zmm3
; AVX512DQ-NEXT: vptestmd %zmm3, %zmm3, %k1
; AVX512DQ-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512DQ-NEXT: retq
@@ -765,7 +765,7 @@
define <8 x double> @test_mask_vmaxpd(<8 x double> %dst, <8 x double> %i,
; AVX512F-LABEL: test_mask_vmaxpd:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3
+; AVX512F-NEXT: # kill: def $ymm3 killed $ymm3 def $zmm3
; AVX512F-NEXT: vptestmd %zmm3, %zmm3, %k1
; AVX512F-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512F-NEXT: retq
@@ -778,14 +778,14 @@
;
; AVX512BW-LABEL: test_mask_vmaxpd:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3
+; AVX512BW-NEXT: # kill: def $ymm3 killed $ymm3 def $zmm3
; AVX512BW-NEXT: vptestmd %zmm3, %zmm3, %k1
; AVX512BW-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_mask_vmaxpd:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3
+; AVX512DQ-NEXT: # kill: def $ymm3 killed $ymm3 def $zmm3
; AVX512DQ-NEXT: vptestmd %zmm3, %zmm3, %k1
; AVX512DQ-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512DQ-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/avx512-build-vector.ll b/llvm/test/CodeGen/X86/avx512-build-vector.ll
index c7664b6..b001ebf 100644
--- a/llvm/test/CodeGen/X86/avx512-build-vector.ll
+++ b/llvm/test/CodeGen/X86/avx512-build-vector.ll
@@ -14,7 +14,7 @@
define <16 x float> @test3(<4 x float> %a) {
; CHECK-LABEL: test3:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; CHECK-NEXT: vmovaps {{.*#+}} zmm2 = [0,1,2,3,4,18,16,7,8,9,10,11,12,13,14,15]
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpermt2ps %zmm0, %zmm2, %zmm1
diff --git a/llvm/test/CodeGen/X86/avx512-calling-conv.ll b/llvm/test/CodeGen/X86/avx512-calling-conv.ll
index 27c218c..448a040 100644
--- a/llvm/test/CodeGen/X86/avx512-calling-conv.ll
+++ b/llvm/test/CodeGen/X86/avx512-calling-conv.ll
@@ -67,7 +67,7 @@
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; KNL-NEXT: callq _func8xi1
; KNL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; KNL-NEXT: vpslld $31, %ymm0, %ymm0
@@ -95,7 +95,7 @@
; KNL_X32-NEXT: .cfi_def_cfa_offset 16
; KNL_X32-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; KNL_X32-NEXT: vpmovdw %zmm0, %ymm0
-; KNL_X32-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; KNL_X32-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; KNL_X32-NEXT: calll _func8xi1
; KNL_X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; KNL_X32-NEXT: vpslld $31, %ymm0, %ymm0
@@ -195,7 +195,7 @@
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; KNL-NEXT: callq _func8xi1
; KNL-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; KNL-NEXT: popq %rax
@@ -219,7 +219,7 @@
; KNL_X32-NEXT: .cfi_def_cfa_offset 16
; KNL_X32-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; KNL_X32-NEXT: vpmovdw %zmm0, %ymm0
-; KNL_X32-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; KNL_X32-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; KNL_X32-NEXT: calll _func8xi1
; KNL_X32-NEXT: vandps LCPI7_0, %xmm0, %xmm0
; KNL_X32-NEXT: addl $12, %esp
@@ -378,21 +378,21 @@
; KNL-LABEL: test13:
; KNL: ## %bb.0:
; KNL-NEXT: movzbl (%rdi), %eax
-; KNL-NEXT: ## kill: def %al killed %al killed %eax
+; KNL-NEXT: ## kill: def $al killed $al killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: test13:
; SKX: ## %bb.0:
; SKX-NEXT: kmovb (%rdi), %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %al killed %al killed %eax
+; SKX-NEXT: ## kill: def $al killed $al killed $eax
; SKX-NEXT: retq
;
; KNL_X32-LABEL: test13:
; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_X32-NEXT: movzbl (%eax), %eax
-; KNL_X32-NEXT: ## kill: def %al killed %al killed %eax
+; KNL_X32-NEXT: ## kill: def $al killed $al killed $eax
; KNL_X32-NEXT: retl
%bar = load <1 x i1>, <1 x i1>* %foo
ret <1 x i1> %bar
diff --git a/llvm/test/CodeGen/X86/avx512-cmp-kor-sequence.ll b/llvm/test/CodeGen/X86/avx512-cmp-kor-sequence.ll
index 29b9afe..5c90498 100644
--- a/llvm/test/CodeGen/X86/avx512-cmp-kor-sequence.ll
+++ b/llvm/test/CodeGen/X86/avx512-cmp-kor-sequence.ll
@@ -19,7 +19,7 @@
; CHECK-NEXT: korw %k2, %k1, %k1
; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
entry:
%0 = tail call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> %a, <16 x float> %x, i32 13, i16 -1, i32 4)
diff --git a/llvm/test/CodeGen/X86/avx512-cvt.ll b/llvm/test/CodeGen/X86/avx512-cvt.ll
index 5ad7892..29abb3a8 100644
--- a/llvm/test/CodeGen/X86/avx512-cvt.ll
+++ b/llvm/test/CodeGen/X86/avx512-cvt.ll
@@ -80,9 +80,9 @@
;
; AVX512DQ-LABEL: slto4f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512DQ-NEXT: retq
%b = sitofp <4 x i64> %a to <4 x double>
ret <4 x double> %b
@@ -105,9 +105,9 @@
;
; AVX512DQ-LABEL: slto2f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = sitofp <2 x i64> %a to <2 x double>
@@ -133,9 +133,9 @@
;
; AVX512DQ-LABEL: sltof2f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = sitofp <2 x i64> %a to <2 x float>
@@ -170,7 +170,7 @@
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovups (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%a1 = load <4 x i64>, <4 x i64>* %a, align 8
@@ -204,9 +204,9 @@
;
; AVX512DQ-LABEL: f64to4sl:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vcvttpd2qq %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512DQ-NEXT: retq
%b = fptosi <4 x double> %a to <4 x i64>
ret <4 x i64> %b
@@ -238,9 +238,9 @@
;
; AVX512DQ-LABEL: f32to4sl:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512DQ-NEXT: retq
%b = fptosi <4 x float> %a to <4 x i64>
ret <4 x i64> %b
@@ -272,9 +272,9 @@
;
; AVX512DQ-LABEL: slto4f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = sitofp <4 x i64> %a to <4 x float>
@@ -307,9 +307,9 @@
;
; AVX512DQ-LABEL: ulto4f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = uitofp <4 x i64> %a to <4 x float>
@@ -484,9 +484,9 @@
define <8 x i32> @f32to8ui(<8 x float> %a) nounwind {
; NOVL-LABEL: f32to8ui:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NOVL-NEXT: vcvttps2udq %zmm0, %zmm0
-; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NOVL-NEXT: retq
;
; VL-LABEL: f32to8ui:
@@ -500,9 +500,9 @@
define <4 x i32> @f32to4ui(<4 x float> %a) nounwind {
; NOVL-LABEL: f32to4ui:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NOVL-NEXT: vcvttps2udq %zmm0, %zmm0
-; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
@@ -528,7 +528,7 @@
; NOVL: # %bb.0:
; NOVL-NEXT: vcvttpd2dq %zmm0, %ymm0
; NOVL-NEXT: vpmovdw %zmm0, %ymm0
-; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
@@ -547,7 +547,7 @@
; NOVL: # %bb.0:
; NOVL-NEXT: vcvttpd2dq %zmm0, %ymm0
; NOVL-NEXT: vpmovdw %zmm0, %ymm0
-; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
@@ -564,9 +564,9 @@
define <4 x i32> @f64to4ui(<4 x double> %a) nounwind {
; NOVL-LABEL: f64to4ui:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NOVL-NEXT: vcvttpd2udq %zmm0, %ymm0
-; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
@@ -705,7 +705,7 @@
; NOVL-NEXT: vptestmd %zmm1, %zmm1, %k1
; NOVL-NEXT: vcvtpd2ps %ymm0, %xmm0
; NOVL-NEXT: vmovaps %zmm0, %zmm0 {%k1} {z}
-; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
@@ -744,12 +744,12 @@
define <4 x double> @f32to4f64_mask(<4 x float> %b, <4 x double> %b1, <4 x double> %a1) {
; NOVL-LABEL: f32to4f64_mask:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
-; NOVL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NOVL-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2
+; NOVL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
; NOVL-NEXT: vcvtps2pd %xmm0, %ymm0
; NOVL-NEXT: vcmpltpd %zmm2, %zmm1, %k1
; NOVL-NEXT: vmovapd %zmm0, %zmm0 {%k1} {z}
-; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NOVL-NEXT: retq
;
; VL-LABEL: f32to4f64_mask:
@@ -1291,9 +1291,9 @@
define <4 x double> @uito4f64(<4 x i32> %a) nounwind {
; NOVL-LABEL: uito4f64:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; NOVL-NEXT: vcvtudq2pd %ymm0, %zmm0
-; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NOVL-NEXT: retq
;
; VL-LABEL: uito4f64:
@@ -1325,9 +1325,9 @@
define <8 x float> @uito8f32(<8 x i32> %a) nounwind {
; NOVL-LABEL: uito8f32:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NOVL-NEXT: vcvtudq2ps %zmm0, %zmm0
-; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NOVL-NEXT: retq
;
; VL-LABEL: uito8f32:
@@ -1341,9 +1341,9 @@
define <4 x float> @uito4f32(<4 x i32> %a) nounwind {
; NOVL-LABEL: uito4f32:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NOVL-NEXT: vcvtudq2ps %zmm0, %zmm0
-; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
@@ -1556,7 +1556,7 @@
define <8 x float> @sbto8f32(<8 x float> %a) {
; NOVLDQ-LABEL: sbto8f32:
; NOVLDQ: # %bb.0:
-; NOVLDQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NOVLDQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NOVLDQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; NOVLDQ-NEXT: vcmpltps %zmm0, %zmm1, %k1
; NOVLDQ-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1582,7 +1582,7 @@
;
; AVX512DQ-LABEL: sbto8f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512DQ-NEXT: vcmpltps %zmm0, %zmm1, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
@@ -1596,7 +1596,7 @@
define <4 x float> @sbto4f32(<4 x float> %a) {
; NOVLDQ-LABEL: sbto4f32:
; NOVLDQ: # %bb.0:
-; NOVLDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NOVLDQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NOVLDQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; NOVLDQ-NEXT: vcmpltps %zmm0, %zmm1, %k1
; NOVLDQ-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1623,7 +1623,7 @@
;
; AVX512DQ-LABEL: sbto4f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512DQ-NEXT: vcmpltps %zmm0, %zmm1, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
@@ -1638,7 +1638,7 @@
define <4 x double> @sbto4f64(<4 x double> %a) {
; NOVLDQ-LABEL: sbto4f64:
; NOVLDQ: # %bb.0:
-; NOVLDQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NOVLDQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NOVLDQ-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; NOVLDQ-NEXT: vcmpltpd %zmm0, %zmm1, %k1
; NOVLDQ-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1664,7 +1664,7 @@
;
; AVX512DQ-LABEL: sbto4f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX512DQ-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
@@ -1678,7 +1678,7 @@
define <2 x float> @sbto2f32(<2 x float> %a) {
; NOVLDQ-LABEL: sbto2f32:
; NOVLDQ: # %bb.0:
-; NOVLDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NOVLDQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NOVLDQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; NOVLDQ-NEXT: vcmpltps %zmm0, %zmm1, %k1
; NOVLDQ-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1705,7 +1705,7 @@
;
; AVX512DQ-LABEL: sbto2f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512DQ-NEXT: vcmpltps %zmm0, %zmm1, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
@@ -1720,7 +1720,7 @@
define <2 x double> @sbto2f64(<2 x double> %a) {
; NOVLDQ-LABEL: sbto2f64:
; NOVLDQ: # %bb.0:
-; NOVLDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NOVLDQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NOVLDQ-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; NOVLDQ-NEXT: vcmpltpd %zmm0, %zmm1, %k1
; NOVLDQ-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1747,7 +1747,7 @@
;
; AVX512DQ-LABEL: sbto2f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX512DQ-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
@@ -1961,7 +1961,7 @@
define <8 x float> @ubto8f32(<8 x i32> %a) {
; NOVLDQ-LABEL: ubto8f32:
; NOVLDQ: # %bb.0:
-; NOVLDQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NOVLDQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NOVLDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVLDQ-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; NOVLDQ-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
@@ -1985,7 +1985,7 @@
;
; AVX512DQ-LABEL: ubto8f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vpmovd2m %zmm0, %k1
; AVX512DQ-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
; AVX512DQ-NEXT: vcvtdq2ps %ymm0, %ymm0
@@ -1998,7 +1998,7 @@
define <8 x double> @ubto8f64(<8 x i32> %a) {
; NOVLDQ-LABEL: ubto8f64:
; NOVLDQ: # %bb.0:
-; NOVLDQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NOVLDQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NOVLDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVLDQ-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; NOVLDQ-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
@@ -2022,7 +2022,7 @@
;
; AVX512DQ-LABEL: ubto8f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vpmovd2m %zmm0, %k1
; AVX512DQ-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
; AVX512DQ-NEXT: vcvtdq2pd %ymm0, %zmm0
@@ -2035,7 +2035,7 @@
define <4 x float> @ubto4f32(<4 x i32> %a) {
; NOVLDQ-LABEL: ubto4f32:
; NOVLDQ: # %bb.0:
-; NOVLDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NOVLDQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NOVLDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVLDQ-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; NOVLDQ-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
@@ -2060,7 +2060,7 @@
;
; AVX512DQ-LABEL: ubto4f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vpmovd2m %zmm0, %k1
; AVX512DQ-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
; AVX512DQ-NEXT: vcvtdq2ps %xmm0, %xmm0
@@ -2074,7 +2074,7 @@
define <4 x double> @ubto4f64(<4 x i32> %a) {
; NOVLDQ-LABEL: ubto4f64:
; NOVLDQ: # %bb.0:
-; NOVLDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NOVLDQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NOVLDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVLDQ-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; NOVLDQ-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
@@ -2098,7 +2098,7 @@
;
; AVX512DQ-LABEL: ubto4f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vpmovd2m %zmm0, %k1
; AVX512DQ-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
; AVX512DQ-NEXT: vcvtdq2pd %xmm0, %ymm0
@@ -2140,7 +2140,7 @@
; NOVL-NEXT: vpcmpltuq %zmm1, %zmm0, %k1
; NOVL-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
; NOVL-NEXT: vcvtudq2pd %ymm0, %zmm0
-; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
@@ -2160,7 +2160,7 @@
define <2 x i64> @test_2f64toub(<2 x double> %a, <2 x i64> %passthru) {
; KNL-LABEL: test_2f64toub:
; KNL: # %bb.0:
-; KNL-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; KNL-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; KNL-NEXT: vcvttsd2si %xmm0, %eax
; KNL-NEXT: andl $1, %eax
; KNL-NEXT: kmovw %eax, %k0
@@ -2170,7 +2170,7 @@
; KNL-NEXT: kshiftlw $1, %k1, %k1
; KNL-NEXT: korw %k1, %k0, %k1
; KNL-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -2184,7 +2184,7 @@
;
; AVX512DQ-LABEL: test_2f64toub:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512DQ-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512DQ-NEXT: vcvttsd2si %xmm2, %eax
; AVX512DQ-NEXT: kmovw %eax, %k0
@@ -2195,13 +2195,13 @@
; AVX512DQ-NEXT: kshiftrb $7, %k1, %k1
; AVX512DQ-NEXT: korb %k0, %k1, %k1
; AVX512DQ-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z}
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_2f64toub:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; AVX512BW-NEXT: vcvttsd2si %xmm0, %eax
; AVX512BW-NEXT: andl $1, %eax
; AVX512BW-NEXT: kmovw %eax, %k0
@@ -2211,7 +2211,7 @@
; AVX512BW-NEXT: kshiftlw $1, %k1, %k1
; AVX512BW-NEXT: korw %k1, %k0, %k1
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%mask = fptoui <2 x double> %a to <2 x i1>
@@ -2222,12 +2222,12 @@
define <4 x i64> @test_4f64toub(<4 x double> %a, <4 x i64> %passthru) {
; NOVL-LABEL: test_4f64toub:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NOVL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
; NOVL-NEXT: vcvttpd2dq %ymm0, %xmm0
; NOVL-NEXT: vpslld $31, %xmm0, %xmm0
; NOVL-NEXT: vptestmd %zmm0, %zmm0, %k1
; NOVL-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z}
-; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NOVL-NEXT: retq
;
; VL-LABEL: test_4f64toub:
@@ -2266,12 +2266,12 @@
define <2 x i64> @test_2f32toub(<2 x float> %a, <2 x i64> %passthru) {
; NOVL-LABEL: test_2f32toub:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; NOVL-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; NOVL-NEXT: vcvttps2dq %xmm0, %xmm0
; NOVL-NEXT: vpslld $31, %xmm0, %xmm0
; NOVL-NEXT: vptestmd %zmm0, %zmm0, %k1
; NOVL-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z}
-; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
@@ -2290,12 +2290,12 @@
define <4 x i64> @test_4f32toub(<4 x float> %a, <4 x i64> %passthru) {
; NOVL-LABEL: test_4f32toub:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NOVL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
; NOVL-NEXT: vcvttps2dq %xmm0, %xmm0
; NOVL-NEXT: vpslld $31, %xmm0, %xmm0
; NOVL-NEXT: vptestmd %zmm0, %zmm0, %k1
; NOVL-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z}
-; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NOVL-NEXT: retq
;
; VL-LABEL: test_4f32toub:
@@ -2347,7 +2347,7 @@
define <2 x i64> @test_2f64tosb(<2 x double> %a, <2 x i64> %passthru) {
; KNL-LABEL: test_2f64tosb:
; KNL: # %bb.0:
-; KNL-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; KNL-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; KNL-NEXT: vcvttsd2si %xmm0, %eax
; KNL-NEXT: andl $1, %eax
; KNL-NEXT: kmovw %eax, %k0
@@ -2357,7 +2357,7 @@
; KNL-NEXT: kshiftlw $1, %k1, %k1
; KNL-NEXT: korw %k1, %k0, %k1
; KNL-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -2371,7 +2371,7 @@
;
; AVX512DQ-LABEL: test_2f64tosb:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512DQ-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512DQ-NEXT: vcvttsd2si %xmm2, %eax
; AVX512DQ-NEXT: kmovw %eax, %k0
@@ -2382,13 +2382,13 @@
; AVX512DQ-NEXT: kshiftrb $7, %k1, %k1
; AVX512DQ-NEXT: korb %k0, %k1, %k1
; AVX512DQ-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z}
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_2f64tosb:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; AVX512BW-NEXT: vcvttsd2si %xmm0, %eax
; AVX512BW-NEXT: andl $1, %eax
; AVX512BW-NEXT: kmovw %eax, %k0
@@ -2398,7 +2398,7 @@
; AVX512BW-NEXT: kshiftlw $1, %k1, %k1
; AVX512BW-NEXT: korw %k1, %k0, %k1
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%mask = fptosi <2 x double> %a to <2 x i1>
@@ -2409,11 +2409,11 @@
define <4 x i64> @test_4f64tosb(<4 x double> %a, <4 x i64> %passthru) {
; NOVL-LABEL: test_4f64tosb:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NOVL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
; NOVL-NEXT: vcvttpd2dq %ymm0, %xmm0
; NOVL-NEXT: vptestmd %zmm0, %zmm0, %k1
; NOVL-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z}
-; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NOVL-NEXT: retq
;
; VL-LABEL: test_4f64tosb:
@@ -2449,11 +2449,11 @@
define <2 x i64> @test_2f32tosb(<2 x float> %a, <2 x i64> %passthru) {
; NOVL-LABEL: test_2f32tosb:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; NOVL-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; NOVL-NEXT: vcvttps2dq %xmm0, %xmm0
; NOVL-NEXT: vptestmd %zmm0, %zmm0, %k1
; NOVL-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z}
-; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
@@ -2471,11 +2471,11 @@
define <4 x i64> @test_4f32tosb(<4 x float> %a, <4 x i64> %passthru) {
; NOVL-LABEL: test_4f32tosb:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NOVL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
; NOVL-NEXT: vcvttps2dq %xmm0, %xmm0
; NOVL-NEXT: vptestmd %zmm0, %zmm0, %k1
; NOVL-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z}
-; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NOVL-NEXT: retq
;
; VL-LABEL: test_4f32tosb:
diff --git a/llvm/test/CodeGen/X86/avx512-ext.ll b/llvm/test/CodeGen/X86/avx512-ext.ll
index ea73bdb..130bba5 100644
--- a/llvm/test/CodeGen/X86/avx512-ext.ll
+++ b/llvm/test/CodeGen/X86/avx512-ext.ll
@@ -302,7 +302,7 @@
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_4x8mem_to_4x32:
@@ -324,7 +324,7 @@
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovsxbd (%rdi), %xmm0
; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_4x8mem_to_4x32:
@@ -347,7 +347,7 @@
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x8mem_to_8x32:
@@ -370,7 +370,7 @@
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovsxbd (%rdi), %ymm0
; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8x8mem_to_8x32:
@@ -492,7 +492,7 @@
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_2x8mem_to_2x64:
@@ -513,7 +513,7 @@
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovsxbq (%rdi), %xmm0
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_2x8mem_to_2x64mask:
@@ -544,7 +544,7 @@
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovzxbq {{.*#+}} ymm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_4x8mem_to_4x64:
@@ -566,7 +566,7 @@
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovsxbq (%rdi), %ymm0
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_4x8mem_to_4x64mask:
@@ -650,7 +650,7 @@
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_4x16mem_to_4x32:
@@ -672,7 +672,7 @@
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovsxwd (%rdi), %xmm0
; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_4x16mem_to_4x32mask:
@@ -706,7 +706,7 @@
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x16mem_to_8x32:
@@ -729,7 +729,7 @@
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovsxwd (%rdi), %ymm0
; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8x16mem_to_8x32mask:
@@ -762,7 +762,7 @@
; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
; KNL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x16_to_8x32mask:
@@ -872,7 +872,7 @@
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_2x16mem_to_2x64:
@@ -894,7 +894,7 @@
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovsxwq (%rdi), %xmm0
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_2x16mem_to_2x64mask:
@@ -926,7 +926,7 @@
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovzxwq {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_4x16mem_to_4x64:
@@ -948,7 +948,7 @@
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovsxwq (%rdi), %ymm0
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_4x16mem_to_4x64mask:
@@ -1061,7 +1061,7 @@
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_2x32mem_to_2x64:
@@ -1083,7 +1083,7 @@
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovsxdq (%rdi), %xmm0
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_2x32mem_to_2x64mask:
@@ -1115,7 +1115,7 @@
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovzxdq {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_4x32mem_to_4x64:
@@ -1137,7 +1137,7 @@
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovsxdq (%rdi), %ymm0
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_4x32mem_to_4x64mask:
@@ -1178,7 +1178,7 @@
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k1
; KNL-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_4x32_to_4x64mask:
@@ -1331,7 +1331,7 @@
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: # kill: def %ax killed %ax killed %eax
+; KNL-NEXT: # kill: def $ax killed $ax killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_16i8_to_16i1:
@@ -1339,7 +1339,7 @@
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: # kill: def %ax killed %ax killed %eax
+; SKX-NEXT: # kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq
%mask_b = trunc <16 x i8>%a to <16 x i1>
%mask = bitcast <16 x i1> %mask_b to i16
@@ -1352,7 +1352,7 @@
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: # kill: def %ax killed %ax killed %eax
+; KNL-NEXT: # kill: def $ax killed $ax killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_16i32_to_16i1:
@@ -1360,7 +1360,7 @@
; SKX-NEXT: vpslld $31, %zmm0, %zmm0
; SKX-NEXT: vptestmd %zmm0, %zmm0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: # kill: def %ax killed %ax killed %eax
+; SKX-NEXT: # kill: def $ax killed $ax killed $eax
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%mask_b = trunc <16 x i32>%a to <16 x i1>
@@ -1390,7 +1390,7 @@
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: # kill: def %al killed %al killed %eax
+; KNL-NEXT: # kill: def $al killed $al killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_8i16_to_8i1:
@@ -1398,7 +1398,7 @@
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: # kill: def %al killed %al killed %eax
+; SKX-NEXT: # kill: def $al killed $al killed $eax
; SKX-NEXT: retq
%mask_b = trunc <8 x i16>%a to <8 x i1>
%mask = bitcast <8 x i1> %mask_b to i8
@@ -1410,7 +1410,7 @@
; KNL: # %bb.0:
; KNL-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0
; KNL-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8i1_8i32:
@@ -1436,7 +1436,7 @@
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: korw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: # kill: def %ax killed %ax killed %eax
+; KNL-NEXT: # kill: def $ax killed $ax killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_i32_to_i1:
@@ -1449,7 +1449,7 @@
; SKX-NEXT: kmovw %edi, %k1
; SKX-NEXT: korw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: # kill: def %ax killed %ax killed %eax
+; SKX-NEXT: # kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq
%a_i = trunc i32 %a to i1
%maskv = insertelement <16 x i1> <i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, i1 %a_i, i32 0
@@ -1462,7 +1462,7 @@
; KNL: # %bb.0:
; KNL-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8i1_8i16:
diff --git a/llvm/test/CodeGen/X86/avx512-extract-subvector.ll b/llvm/test/CodeGen/X86/avx512-extract-subvector.ll
index d0b6369..7a92cc1 100644
--- a/llvm/test/CodeGen/X86/avx512-extract-subvector.ll
+++ b/llvm/test/CodeGen/X86/avx512-extract-subvector.ll
@@ -15,7 +15,7 @@
define <8 x i16> @extract_subvector128_v32i16_first_element(<32 x i16> %x) nounwind {
; SKX-LABEL: extract_subvector128_v32i16_first_element:
; SKX: ## %bb.0:
-; SKX-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; SKX-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%r1 = shufflevector <32 x i16> %x, <32 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -35,7 +35,7 @@
define <16 x i8> @extract_subvector128_v64i8_first_element(<64 x i8> %x) nounwind {
; SKX-LABEL: extract_subvector128_v64i8_first_element:
; SKX: ## %bb.0:
-; SKX-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; SKX-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%r1 = shufflevector <64 x i8> %x, <64 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
diff --git a/llvm/test/CodeGen/X86/avx512-hadd-hsub.ll b/llvm/test/CodeGen/X86/avx512-hadd-hsub.ll
index d5bd7622..3ae8362 100644
--- a/llvm/test/CodeGen/X86/avx512-hadd-hsub.ll
+++ b/llvm/test/CodeGen/X86/avx512-hadd-hsub.ll
@@ -63,7 +63,7 @@
; KNL-NEXT: vaddps %zmm1, %zmm0, %zmm0
; KNL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; KNL-NEXT: vaddps %zmm1, %zmm0, %zmm0
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: fhadd_16:
@@ -72,7 +72,7 @@
; SKX-NEXT: vaddps %zmm1, %zmm0, %zmm0
; SKX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SKX-NEXT: vaddps %zmm1, %zmm0, %zmm0
-; SKX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; SKX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x226 = shufflevector <16 x float> %x225, <16 x float> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -90,7 +90,7 @@
; KNL-NEXT: vaddps %zmm1, %zmm0, %zmm0
; KNL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; KNL-NEXT: vsubps %zmm1, %zmm0, %zmm0
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: fhsub_16:
@@ -99,7 +99,7 @@
; SKX-NEXT: vaddps %zmm1, %zmm0, %zmm0
; SKX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SKX-NEXT: vsubps %zmm1, %zmm0, %zmm0
-; SKX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; SKX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x226 = shufflevector <16 x float> %x225, <16 x float> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -181,7 +181,7 @@
; KNL-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; KNL-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; KNL-NEXT: vaddpd %zmm0, %zmm2, %zmm0
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: fadd_noundef_low:
@@ -189,7 +189,7 @@
; SKX-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; SKX-NEXT: vaddpd %zmm0, %zmm2, %zmm0
-; SKX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; SKX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; SKX-NEXT: retq
%x226 = shufflevector <8 x double> %x225, <8 x double> %x227, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
%x228 = shufflevector <8 x double> %x225, <8 x double> %x227, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5 ,i32 13, i32 7, i32 15>
@@ -228,7 +228,7 @@
; KNL-NEXT: vshufps {{.*#+}} zmm2 = zmm0[0,2],zmm1[0,2],zmm0[4,6],zmm1[4,6],zmm0[8,10],zmm1[8,10],zmm0[12,14],zmm1[12,14]
; KNL-NEXT: vshufps {{.*#+}} zmm0 = zmm0[1,3],zmm1[1,3],zmm0[5,7],zmm1[5,7],zmm0[9,11],zmm1[9,11],zmm0[13,15],zmm1[13,15]
; KNL-NEXT: vpaddd %zmm0, %zmm2, %zmm0
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: hadd_16_3_sv:
@@ -236,7 +236,7 @@
; SKX-NEXT: vshufps {{.*#+}} zmm2 = zmm0[0,2],zmm1[0,2],zmm0[4,6],zmm1[4,6],zmm0[8,10],zmm1[8,10],zmm0[12,14],zmm1[12,14]
; SKX-NEXT: vshufps {{.*#+}} zmm0 = zmm0[1,3],zmm1[1,3],zmm0[5,7],zmm1[5,7],zmm0[9,11],zmm1[9,11],zmm0[13,15],zmm1[13,15]
; SKX-NEXT: vpaddd %zmm0, %zmm2, %zmm0
-; SKX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; SKX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; SKX-NEXT: retq
%x226 = shufflevector <16 x i32> %x225, <16 x i32> %x227, <16 x i32> <i32 0, i32 2, i32 16, i32 18
, i32 4, i32 6, i32 20, i32 22, i32 8, i32 10, i32 24, i32 26, i32 12, i32 14, i32 28, i32 30>
@@ -255,7 +255,7 @@
; KNL-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; KNL-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; KNL-NEXT: vaddpd %zmm0, %zmm2, %zmm0
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: fadd_noundef_eel:
@@ -263,7 +263,7 @@
; SKX-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; SKX-NEXT: vaddpd %zmm0, %zmm2, %zmm0
-; SKX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; SKX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x226 = shufflevector <8 x double> %x225, <8 x double> %x227, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
diff --git a/llvm/test/CodeGen/X86/avx512-insert-extract.ll b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
index b2e3c20..f414643 100644
--- a/llvm/test/CodeGen/X86/avx512-insert-extract.ll
+++ b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
@@ -85,7 +85,7 @@
; CHECK-NEXT: movq %rsp, %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $15, %edi
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -104,7 +104,7 @@
; CHECK-NEXT: movq %rsp, %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -123,7 +123,7 @@
; CHECK-NEXT: movq %rsp, %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -142,7 +142,7 @@
; CHECK-NEXT: movq %rsp, %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $15, %edi
; CHECK-NEXT: movl (%rsp,%rdi,4), %eax
@@ -231,7 +231,7 @@
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: korw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %ax killed %ax killed %eax
+; KNL-NEXT: ## kill: def $ax killed $ax killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: test13:
@@ -246,7 +246,7 @@
; SKX-NEXT: kmovw %eax, %k1
; SKX-NEXT: korw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %ax killed %ax killed %eax
+; SKX-NEXT: ## kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq
%cmp_res = icmp ult i32 %a, %b
%maskv = insertelement <16 x i1> <i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, i1 %cmp_res, i32 0
@@ -309,7 +309,7 @@
; KNL-NEXT: kshiftrw $5, %k1, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %ax killed %ax killed %eax
+; KNL-NEXT: ## kill: def $ax killed $ax killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: test16:
@@ -322,7 +322,7 @@
; SKX-NEXT: kshiftrw $5, %k0, %k0
; SKX-NEXT: kxorw %k0, %k1, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %ax killed %ax killed %eax
+; SKX-NEXT: ## kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq
%x = load i1 , i1 * %addr, align 128
%a1 = bitcast i16 %a to <16 x i1>
@@ -343,7 +343,7 @@
; KNL-NEXT: kshiftrw $11, %k1, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %al killed %al killed %eax
+; KNL-NEXT: ## kill: def $al killed $al killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: test17:
@@ -356,7 +356,7 @@
; SKX-NEXT: kshiftrb $3, %k0, %k0
; SKX-NEXT: kxorb %k0, %k1, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %al killed %al killed %eax
+; SKX-NEXT: ## kill: def $al killed $al killed $eax
; SKX-NEXT: retq
%x = load i1 , i1 * %addr, align 128
%a1 = bitcast i8 %a to <8 x i1>
@@ -451,7 +451,7 @@
; CHECK-NEXT: vpextrw $1, %xmm0, %eax
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpextrw $1, %xmm0, (%rdi)
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%r1 = extractelement <32 x i16> %x, i32 1
@@ -466,7 +466,7 @@
; CHECK-NEXT: vpextrw $1, %xmm0, %eax
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpextrw $1, %xmm0, (%rdi)
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%r1 = extractelement <16 x i16> %x, i32 1
@@ -480,7 +480,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpextrw $1, %xmm0, %eax
; CHECK-NEXT: vpextrw $3, %xmm0, (%rdi)
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%r1 = extractelement <8 x i16> %x, i32 1
%r2 = extractelement <8 x i16> %x, i32 3
@@ -494,7 +494,7 @@
; CHECK-NEXT: vpextrb $1, %xmm0, %eax
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpextrb $1, %xmm0, (%rdi)
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%r1 = extractelement <64 x i8> %x, i32 1
@@ -509,7 +509,7 @@
; CHECK-NEXT: vpextrb $1, %xmm0, %eax
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpextrb $1, %xmm0, (%rdi)
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%r1 = extractelement <32 x i8> %x, i32 1
@@ -523,7 +523,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpextrb $1, %xmm0, %eax
; CHECK-NEXT: vpextrb $3, %xmm0, (%rdi)
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%r1 = extractelement <16 x i8> %x, i32 1
%r2 = extractelement <16 x i8> %x, i32 3
@@ -825,8 +825,8 @@
define i8 @test_iinsertelement_v4i1(i32 %a, i32 %b, <4 x i32> %x , <4 x i32> %y) {
; KNL-LABEL: test_iinsertelement_v4i1:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: cmpl %esi, %edi
; KNL-NEXT: setb %al
; KNL-NEXT: vpcmpltud %zmm1, %zmm0, %k0
@@ -837,7 +837,7 @@
; KNL-NEXT: kshiftrw $13, %k1, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %al killed %al killed %eax
+; KNL-NEXT: ## kill: def $al killed $al killed $eax
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -853,7 +853,7 @@
; SKX-NEXT: kshiftrb $5, %k1, %k1
; SKX-NEXT: kxorb %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %al killed %al killed %eax
+; SKX-NEXT: ## kill: def $al killed $al killed $eax
; SKX-NEXT: retq
%cmp_res_i1 = icmp ult i32 %a, %b
%cmp_cmp_vec = icmp ult <4 x i32> %x, %y
@@ -866,8 +866,8 @@
define i8 @test_iinsertelement_v2i1(i32 %a, i32 %b, <2 x i64> %x , <2 x i64> %y) {
; KNL-LABEL: test_iinsertelement_v2i1:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: cmpl %esi, %edi
; KNL-NEXT: setb %al
; KNL-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
@@ -877,7 +877,7 @@
; KNL-NEXT: kshiftlw $1, %k1, %k1
; KNL-NEXT: korw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %al killed %al killed %eax
+; KNL-NEXT: ## kill: def $al killed $al killed $eax
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -892,7 +892,7 @@
; SKX-NEXT: kshiftlb $1, %k1, %k1
; SKX-NEXT: korb %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %al killed %al killed %eax
+; SKX-NEXT: ## kill: def $al killed $al killed $eax
; SKX-NEXT: retq
%cmp_res_i1 = icmp ult i32 %a, %b
%cmp_cmp_vec = icmp ult <2 x i64> %x, %y
@@ -905,8 +905,8 @@
define zeroext i8 @test_extractelement_v2i1(<2 x i64> %a, <2 x i64> %b) {
; KNL-LABEL: test_extractelement_v2i1:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: vpcmpnleuq %zmm1, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: andb $1, %al
@@ -934,8 +934,8 @@
define zeroext i8 @extractelement_v2i1_alt(<2 x i64> %a, <2 x i64> %b) {
; KNL-LABEL: extractelement_v2i1_alt:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: vpcmpnleuq %zmm1, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: andb $1, %al
@@ -964,8 +964,8 @@
define zeroext i8 @test_extractelement_v4i1(<4 x i32> %a, <4 x i32> %b) {
; KNL-LABEL: test_extractelement_v4i1:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
; KNL-NEXT: kshiftrw $3, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
@@ -1091,7 +1091,7 @@
define i64 @test_extractelement_variable_v2i64(<2 x i64> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v2i64:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: movq -24(%rsp,%rdi,8), %rax
@@ -1110,7 +1110,7 @@
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $3, %edi
; CHECK-NEXT: movq (%rsp,%rdi,8), %rax
@@ -1132,7 +1132,7 @@
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: movq (%rsp,%rdi,8), %rax
@@ -1147,7 +1147,7 @@
define double @test_extractelement_variable_v2f64(<2 x double> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v2f64:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -1166,7 +1166,7 @@
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $3, %edi
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -1188,7 +1188,7 @@
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -1203,7 +1203,7 @@
define i32 @test_extractelement_variable_v4i32(<4 x i32> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v4i32:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $3, %edi
; CHECK-NEXT: movl -24(%rsp,%rdi,4), %eax
@@ -1222,7 +1222,7 @@
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: movl (%rsp,%rdi,4), %eax
@@ -1244,7 +1244,7 @@
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $15, %edi
; CHECK-NEXT: movl (%rsp,%rdi,4), %eax
@@ -1259,7 +1259,7 @@
define float @test_extractelement_variable_v4f32(<4 x float> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v4f32:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $3, %edi
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -1278,7 +1278,7 @@
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -1300,7 +1300,7 @@
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $15, %edi
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -1315,7 +1315,7 @@
define i16 @test_extractelement_variable_v8i16(<8 x i16> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v8i16:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: movzwl -24(%rsp,%rdi,2), %eax
@@ -1334,7 +1334,7 @@
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $15, %edi
; CHECK-NEXT: movzwl (%rsp,%rdi,2), %eax
@@ -1356,7 +1356,7 @@
; KNL-NEXT: .cfi_def_cfa_register %rbp
; KNL-NEXT: andq $-64, %rsp
; KNL-NEXT: subq $128, %rsp
-; KNL-NEXT: ## kill: def %edi killed %edi def %rdi
+; KNL-NEXT: ## kill: def $edi killed $edi def $rdi
; KNL-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
; KNL-NEXT: vmovaps %ymm0, (%rsp)
; KNL-NEXT: andl $31, %edi
@@ -1375,7 +1375,7 @@
; SKX-NEXT: .cfi_def_cfa_register %rbp
; SKX-NEXT: andq $-64, %rsp
; SKX-NEXT: subq $128, %rsp
-; SKX-NEXT: ## kill: def %edi killed %edi def %rdi
+; SKX-NEXT: ## kill: def $edi killed $edi def $rdi
; SKX-NEXT: vmovaps %zmm0, (%rsp)
; SKX-NEXT: andl $31, %edi
; SKX-NEXT: movzwl (%rsp,%rdi,2), %eax
@@ -1390,7 +1390,7 @@
define i8 @test_extractelement_variable_v16i8(<16 x i8> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v16i8:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $15, %edi
; CHECK-NEXT: movb -24(%rsp,%rdi), %al
@@ -1409,7 +1409,7 @@
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $31, %edi
; CHECK-NEXT: movb (%rsp,%rdi), %al
@@ -1432,7 +1432,7 @@
; KNL-NEXT: .cfi_def_cfa_register %rbp
; KNL-NEXT: andq $-64, %rsp
; KNL-NEXT: subq $128, %rsp
-; KNL-NEXT: ## kill: def %edi killed %edi def %rdi
+; KNL-NEXT: ## kill: def $edi killed $edi def $rdi
; KNL-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
; KNL-NEXT: vmovaps %ymm0, (%rsp)
; KNL-NEXT: andl $63, %edi
@@ -1451,7 +1451,7 @@
; SKX-NEXT: .cfi_def_cfa_register %rbp
; SKX-NEXT: andq $-64, %rsp
; SKX-NEXT: subq $128, %rsp
-; SKX-NEXT: ## kill: def %edi killed %edi def %rdi
+; SKX-NEXT: ## kill: def $edi killed $edi def $rdi
; SKX-NEXT: vmovaps %zmm0, (%rsp)
; SKX-NEXT: andl $63, %edi
; SKX-NEXT: movb (%rsp,%rdi), %al
@@ -1512,9 +1512,9 @@
define zeroext i8 @test_extractelement_varible_v2i1(<2 x i64> %a, <2 x i64> %b, i32 %index) {
; KNL-LABEL: test_extractelement_varible_v2i1:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %edi killed %edi def %rdi
-; KNL-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL-NEXT: ## kill: def $edi killed $edi def $rdi
+; KNL-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: vpcmpnleuq %zmm1, %zmm0, %k1
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vextracti32x4 $0, %zmm0, -{{[0-9]+}}(%rsp)
@@ -1526,7 +1526,7 @@
;
; SKX-LABEL: test_extractelement_varible_v2i1:
; SKX: ## %bb.0:
-; SKX-NEXT: ## kill: def %edi killed %edi def %rdi
+; SKX-NEXT: ## kill: def $edi killed $edi def $rdi
; SKX-NEXT: vpcmpnleuq %xmm1, %xmm0, %k0
; SKX-NEXT: vpmovm2q %k0, %xmm0
; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp)
@@ -1543,9 +1543,9 @@
define zeroext i8 @test_extractelement_varible_v4i1(<4 x i32> %a, <4 x i32> %b, i32 %index) {
; KNL-LABEL: test_extractelement_varible_v4i1:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %edi killed %edi def %rdi
-; KNL-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL-NEXT: ## kill: def $edi killed $edi def $rdi
+; KNL-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: vpcmpnleud %zmm1, %zmm0, %k1
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vextracti32x4 $0, %zmm0, -{{[0-9]+}}(%rsp)
@@ -1557,7 +1557,7 @@
;
; SKX-LABEL: test_extractelement_varible_v4i1:
; SKX: ## %bb.0:
-; SKX-NEXT: ## kill: def %edi killed %edi def %rdi
+; SKX-NEXT: ## kill: def $edi killed $edi def $rdi
; SKX-NEXT: vpcmpnleud %xmm1, %xmm0, %k0
; SKX-NEXT: vpmovm2d %k0, %xmm0
; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp)
@@ -1574,9 +1574,9 @@
define zeroext i8 @test_extractelement_varible_v8i1(<8 x i32> %a, <8 x i32> %b, i32 %index) {
; KNL-LABEL: test_extractelement_varible_v8i1:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %edi killed %edi def %rdi
-; KNL-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
-; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: ## kill: def $edi killed $edi def $rdi
+; KNL-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
+; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpcmpnleud %zmm1, %zmm0, %k1
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovdw %zmm0, %ymm0
@@ -1589,7 +1589,7 @@
;
; SKX-LABEL: test_extractelement_varible_v8i1:
; SKX: ## %bb.0:
-; SKX-NEXT: ## kill: def %edi killed %edi def %rdi
+; SKX-NEXT: ## kill: def $edi killed $edi def $rdi
; SKX-NEXT: vpcmpnleud %ymm1, %ymm0, %k0
; SKX-NEXT: vpmovm2w %k0, %xmm0
; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp)
@@ -1607,7 +1607,7 @@
define zeroext i8 @test_extractelement_varible_v16i1(<16 x i32> %a, <16 x i32> %b, i32 %index) {
; KNL-LABEL: test_extractelement_varible_v16i1:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %edi killed %edi def %rdi
+; KNL-NEXT: ## kill: def $edi killed $edi def $rdi
; KNL-NEXT: vpcmpnleud %zmm1, %zmm0, %k1
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovdb %zmm0, -{{[0-9]+}}(%rsp)
@@ -1619,7 +1619,7 @@
;
; SKX-LABEL: test_extractelement_varible_v16i1:
; SKX: ## %bb.0:
-; SKX-NEXT: ## kill: def %edi killed %edi def %rdi
+; SKX-NEXT: ## kill: def $edi killed $edi def $rdi
; SKX-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
; SKX-NEXT: vpmovm2b %k0, %xmm0
; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp)
@@ -1644,7 +1644,7 @@
; KNL-NEXT: .cfi_def_cfa_register %rbp
; KNL-NEXT: andq $-32, %rsp
; KNL-NEXT: subq $64, %rsp
-; KNL-NEXT: ## kill: def %edi killed %edi def %rdi
+; KNL-NEXT: ## kill: def $edi killed $edi def $rdi
; KNL-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; KNL-NEXT: vpxor %ymm2, %ymm1, %ymm1
; KNL-NEXT: vpxor %ymm2, %ymm0, %ymm0
@@ -1667,7 +1667,7 @@
; SKX-NEXT: .cfi_def_cfa_register %rbp
; SKX-NEXT: andq $-32, %rsp
; SKX-NEXT: subq $64, %rsp
-; SKX-NEXT: ## kill: def %edi killed %edi def %rdi
+; SKX-NEXT: ## kill: def $edi killed $edi def $rdi
; SKX-NEXT: vpcmpnleub %ymm1, %ymm0, %k0
; SKX-NEXT: vpmovm2b %k0, %ymm0
; SKX-NEXT: vmovdqa %ymm0, (%rsp)
@@ -1706,7 +1706,7 @@
; KNL-NEXT: .cfi_def_cfa_register %rbp
; KNL-NEXT: andq $-32, %rsp
; KNL-NEXT: subq $64, %rsp
-; KNL-NEXT: ## kill: def %esi killed %esi def %rsi
+; KNL-NEXT: ## kill: def $esi killed $esi def $rsi
; KNL-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; KNL-NEXT: vpxor %ymm1, %ymm0, %ymm0
; KNL-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
@@ -1738,7 +1738,7 @@
; SKX-NEXT: .cfi_def_cfa_register %rbp
; SKX-NEXT: andq $-32, %rsp
; SKX-NEXT: subq $64, %rsp
-; SKX-NEXT: ## kill: def %esi killed %esi def %rsi
+; SKX-NEXT: ## kill: def $esi killed $esi def $rsi
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; SKX-NEXT: vpcmpnleub %ymm1, %ymm0, %k0
; SKX-NEXT: andl $31, %esi
@@ -1770,7 +1770,7 @@
; KNL-NEXT: .cfi_def_cfa_register %rbp
; KNL-NEXT: andq $-64, %rsp
; KNL-NEXT: subq $128, %rsp
-; KNL-NEXT: ## kill: def %esi killed %esi def %rsi
+; KNL-NEXT: ## kill: def $esi killed $esi def $rsi
; KNL-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; KNL-NEXT: vpxor %ymm2, %ymm0, %ymm0
; KNL-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
@@ -1821,7 +1821,7 @@
; SKX-NEXT: .cfi_def_cfa_register %rbp
; SKX-NEXT: andq $-64, %rsp
; SKX-NEXT: subq $128, %rsp
-; SKX-NEXT: ## kill: def %esi killed %esi def %rsi
+; SKX-NEXT: ## kill: def $esi killed $esi def $rsi
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; SKX-NEXT: vpcmpnleub %zmm1, %zmm0, %k0
; SKX-NEXT: andl $63, %esi
@@ -2172,7 +2172,7 @@
; KNL-NEXT: .cfi_def_cfa_register %rbp
; KNL-NEXT: andq $-128, %rsp
; KNL-NEXT: subq $256, %rsp ## imm = 0x100
-; KNL-NEXT: ## kill: def %esi killed %esi def %rsi
+; KNL-NEXT: ## kill: def $esi killed $esi def $rsi
; KNL-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; KNL-NEXT: vpxor %ymm4, %ymm0, %ymm0
; KNL-NEXT: vpcmpgtb %ymm4, %ymm0, %ymm0
@@ -2255,7 +2255,7 @@
; SKX-NEXT: .cfi_def_cfa_register %rbp
; SKX-NEXT: andq $-128, %rsp
; SKX-NEXT: subq $256, %rsp ## imm = 0x100
-; SKX-NEXT: ## kill: def %esi killed %esi def %rsi
+; SKX-NEXT: ## kill: def $esi killed $esi def $rsi
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpcmpnleub %zmm2, %zmm0, %k0
; SKX-NEXT: vpcmpnleub %zmm2, %zmm1, %k1
diff --git a/llvm/test/CodeGen/X86/avx512-insert-extract_i1.ll b/llvm/test/CodeGen/X86/avx512-insert-extract_i1.ll
index 9bfb472..a87a2a3 100644
--- a/llvm/test/CodeGen/X86/avx512-insert-extract_i1.ll
+++ b/llvm/test/CodeGen/X86/avx512-insert-extract_i1.ll
@@ -13,7 +13,7 @@
; SKX-NEXT: .cfi_def_cfa_register %rbp
; SKX-NEXT: andq $-64, %rsp
; SKX-NEXT: subq $128, %rsp
-; SKX-NEXT: ## kill: def %edi killed %edi def %rdi
+; SKX-NEXT: ## kill: def $edi killed $edi def $rdi
; SKX-NEXT: vpcmpnleub %zmm1, %zmm0, %k0
; SKX-NEXT: vpmovm2b %k0, %zmm0
; SKX-NEXT: vmovdqa64 %zmm0, (%rsp)
diff --git a/llvm/test/CodeGen/X86/avx512-intrinsics-upgrade.ll b/llvm/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
index 11157bd..1fa5af6 100644
--- a/llvm/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
+++ b/llvm/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
@@ -10,7 +10,7 @@
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: kunpckbw %k0, %k1, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.kunpck.bw(i16 %a0, i16 %a1)
ret i16 %res
@@ -559,7 +559,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.d.512(<16 x i32> %a, <16 x i32> %b, i16 -1)
ret i16 %res
@@ -571,7 +571,7 @@
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.d.512(<16 x i32> %a, <16 x i32> %b, i16 %mask)
ret i16 %res
@@ -584,7 +584,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.512(<8 x i64> %a, <8 x i64> %b, i8 -1)
ret i8 %res
@@ -596,7 +596,7 @@
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.512(<8 x i64> %a, <8 x i64> %b, i8 %mask)
ret i8 %res
@@ -609,7 +609,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.d.512(<16 x i32> %a, <16 x i32> %b, i16 -1)
ret i16 %res
@@ -621,7 +621,7 @@
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.d.512(<16 x i32> %a, <16 x i32> %b, i16 %mask)
ret i16 %res
@@ -634,7 +634,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.512(<8 x i64> %a, <8 x i64> %b, i8 -1)
ret i8 %res
@@ -646,7 +646,7 @@
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.512(<8 x i64> %a, <8 x i64> %b, i8 %mask)
ret i8 %res
@@ -3054,7 +3054,7 @@
define <16 x float>@test_int_x86_avx512_mask_insertf32x4_512(<16 x float> %x0, <4 x float> %x1, <16 x float> %x3, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_insertf32x4_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
+; CHECK-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm2 {%k1}
@@ -3075,7 +3075,7 @@
define <16 x i32>@test_int_x86_avx512_mask_inserti32x4_512(<16 x i32> %x0, <4 x i32> %x1, <16 x i32> %x3, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_inserti32x4_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
+; CHECK-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
; CHECK-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm2 {%k1}
@@ -3519,7 +3519,7 @@
define <16 x float>@test_int_x86_avx512_mask_broadcastf32x4_512(<4 x float> %x0, <16 x float> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x4_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; CHECK-NEXT: kmovw %edi, %k1
@@ -3553,7 +3553,7 @@
define <8 x double>@test_int_x86_avx512_mask_broadcastf64x4_512(<4 x double> %x0, <8 x double> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x4_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; CHECK-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm1 {%k1}
@@ -3587,7 +3587,7 @@
define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x4_512(<4 x i32> %x0, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x4_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; CHECK-NEXT: kmovw %edi, %k1
@@ -3622,7 +3622,7 @@
define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x4_512(<4 x i64> %x0, <8 x i64> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x4_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; CHECK-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm1 {%k1}
@@ -3692,7 +3692,7 @@
; CHECK-NEXT: vptestmq %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addb %cl, %al
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.ptestm.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 -1)
%res1 = call i8 @llvm.x86.avx512.ptestm.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 %m)
@@ -3710,7 +3710,7 @@
; CHECK-NEXT: vptestmd %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.ptestm.d.512(<16 x i32> %a0, <16 x i32> %a1, i16 -1)
%res1 = call i16 @llvm.x86.avx512.ptestm.d.512(<16 x i32> %a0, <16 x i32> %a1, i16 %m)
@@ -3730,7 +3730,7 @@
; CHECK-NEXT: kmovw %k1, %ecx
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.ptestnm.d.512(<16 x i32> %x0, <16 x i32> %x1, i16 %x2)
%res1 = call i16 @llvm.x86.avx512.ptestnm.d.512(<16 x i32> %x0, <16 x i32> %x1, i16-1)
@@ -3749,7 +3749,7 @@
; CHECK-NEXT: kmovw %k1, %ecx
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addb %cl, %al
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.ptestnm.q.512(<8 x i64> %x0, <8 x i64> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.q.512(<8 x i64> %x0, <8 x i64> %x1, i8-1)
diff --git a/llvm/test/CodeGen/X86/avx512-intrinsics.ll b/llvm/test/CodeGen/X86/avx512-intrinsics.ll
index 5069b72..a8edf07 100644
--- a/llvm/test/CodeGen/X86/avx512-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512-intrinsics.ll
@@ -40,7 +40,7 @@
; CHECK-NEXT: kandw %k0, %k1, %k0
; CHECK-NEXT: kandw %k0, %k2, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%t1 = call i16 @llvm.x86.avx512.kand.w(i16 %a0, i16 8)
%t2 = call i16 @llvm.x86.avx512.kand.w(i16 %t1, i16 %a1)
@@ -58,7 +58,7 @@
; CHECK-NEXT: kandnw %k2, %k1, %k1
; CHECK-NEXT: kandnw %k0, %k1, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%t1 = call i16 @llvm.x86.avx512.kandn.w(i16 %a0, i16 8)
%t2 = call i16 @llvm.x86.avx512.kandn.w(i16 %t1, i16 %a1)
@@ -72,7 +72,7 @@
; CHECK-NEXT: kmovw %edi, %k0
; CHECK-NEXT: knotw %k0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.knot.w(i16 %a0)
ret i16 %res
@@ -89,7 +89,7 @@
; CHECK-NEXT: korw %k0, %k1, %k0
; CHECK-NEXT: korw %k0, %k2, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%t1 = call i16 @llvm.x86.avx512.kor.w(i16 %a0, i16 8)
%t2 = call i16 @llvm.x86.avx512.kor.w(i16 %t1, i16 %a1)
@@ -109,7 +109,7 @@
; CHECK-NEXT: kxorw %k0, %k1, %k0
; CHECK-NEXT: kxorw %k0, %k2, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%t1 = call i16 @llvm.x86.avx512.kxnor.w(i16 %a0, i16 8)
%t2 = call i16 @llvm.x86.avx512.kxnor.w(i16 %t1, i16 %a1)
@@ -127,7 +127,7 @@
; CHECK-NEXT: kxorw %k0, %k1, %k0
; CHECK-NEXT: kxorw %k0, %k2, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%t1 = call i16 @llvm.x86.avx512.kxor.w(i16 %a0, i16 8)
%t2 = call i16 @llvm.x86.avx512.kxor.w(i16 %t1, i16 %a1)
@@ -803,7 +803,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpleps {sae}, %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> %a, <16 x float> %b, i32 2, i16 -1, i32 8)
ret i16 %res
@@ -815,7 +815,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpneqpd %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> %a, <8 x double> %b, i32 4, i8 -1, i32 4)
ret i8 %res
@@ -3304,7 +3304,7 @@
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcmpnltsd {sae}, %xmm1, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res4 = call i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double> %x0, <2 x double> %x1, i32 5, i8 %x3, i32 8)
@@ -3326,7 +3326,7 @@
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: orl %edx, %eax
; CHECK-NEXT: orl %ecx, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res1 = call i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double> %x0, <2 x double> %x1, i32 2, i8 -1, i32 4)
@@ -3348,7 +3348,7 @@
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcmpunordss %xmm1, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res2 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 3, i8 %x3, i32 4)
@@ -3371,7 +3371,7 @@
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: andl %edx, %eax
; CHECK-NEXT: andl %ecx, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res1 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 2, i8 -1, i32 4)
%res2 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 3, i8 -1, i32 8)
diff --git a/llvm/test/CodeGen/X86/avx512-mask-op.ll b/llvm/test/CodeGen/X86/avx512-mask-op.ll
index b5ae7a2..064b38f8 100644
--- a/llvm/test/CodeGen/X86/avx512-mask-op.ll
+++ b/llvm/test/CodeGen/X86/avx512-mask-op.ll
@@ -11,7 +11,7 @@
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: knotw %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %ax killed %ax killed %eax
+; KNL-NEXT: ## kill: def $ax killed $ax killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: mask16:
@@ -19,7 +19,7 @@
; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: knotw %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %ax killed %ax killed %eax
+; SKX-NEXT: ## kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: mask16:
@@ -27,7 +27,7 @@
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: knotw %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: def %ax killed %ax killed %eax
+; AVX512BW-NEXT: ## kill: def $ax killed $ax killed $eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: mask16:
@@ -35,7 +35,7 @@
; AVX512DQ-NEXT: kmovw %edi, %k0
; AVX512DQ-NEXT: knotw %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: def %ax killed %ax killed %eax
+; AVX512DQ-NEXT: ## kill: def $ax killed $ax killed $eax
; AVX512DQ-NEXT: retq
%m0 = bitcast i16 %x to <16 x i1>
%m1 = xor <16 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
@@ -84,7 +84,7 @@
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: knotw %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %al killed %al killed %eax
+; KNL-NEXT: ## kill: def $al killed $al killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: mask8:
@@ -92,7 +92,7 @@
; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: knotb %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %al killed %al killed %eax
+; SKX-NEXT: ## kill: def $al killed $al killed $eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: mask8:
@@ -100,7 +100,7 @@
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: knotw %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: ## kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: mask8:
@@ -108,7 +108,7 @@
; AVX512DQ-NEXT: kmovw %edi, %k0
; AVX512DQ-NEXT: knotb %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: def %al killed %al killed %eax
+; AVX512DQ-NEXT: ## kill: def $al killed $al killed $eax
; AVX512DQ-NEXT: retq
%m0 = bitcast i8 %x to <8 x i1>
%m1 = xor <8 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
@@ -235,7 +235,7 @@
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: korw %k0, %k2, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %ax killed %ax killed %eax
+; KNL-NEXT: ## kill: def $ax killed $ax killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: mand16_mem:
@@ -246,7 +246,7 @@
; SKX-NEXT: kxorw %k1, %k0, %k0
; SKX-NEXT: korw %k0, %k2, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %ax killed %ax killed %eax
+; SKX-NEXT: ## kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: mand16_mem:
@@ -257,7 +257,7 @@
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
; AVX512BW-NEXT: korw %k0, %k2, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: def %ax killed %ax killed %eax
+; AVX512BW-NEXT: ## kill: def $ax killed $ax killed $eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: mand16_mem:
@@ -268,7 +268,7 @@
; AVX512DQ-NEXT: kxorw %k1, %k0, %k0
; AVX512DQ-NEXT: korw %k0, %k2, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: def %ax killed %ax killed %eax
+; AVX512DQ-NEXT: ## kill: def $ax killed $ax killed $eax
; AVX512DQ-NEXT: retq
%ma = load <16 x i1>, <16 x i1>* %x
%mb = load <16 x i1>, <16 x i1>* %y
@@ -285,7 +285,7 @@
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: kshiftrw $8, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %al killed %al killed %eax
+; KNL-NEXT: ## kill: def $al killed $al killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: shuf_test1:
@@ -293,7 +293,7 @@
; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: kshiftrw $8, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %al killed %al killed %eax
+; SKX-NEXT: ## kill: def $al killed $al killed $eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: shuf_test1:
@@ -301,7 +301,7 @@
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: kshiftrw $8, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: ## kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: shuf_test1:
@@ -309,7 +309,7 @@
; AVX512DQ-NEXT: kmovw %edi, %k0
; AVX512DQ-NEXT: kshiftrw $8, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: def %al killed %al killed %eax
+; AVX512DQ-NEXT: ## kill: def $al killed $al killed $eax
; AVX512DQ-NEXT: retq
%v1 = bitcast i16 %v to <16 x i1>
%mask = shufflevector <16 x i1> %v1, <16 x i1> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -366,7 +366,7 @@
; KNL-NEXT: kshiftrw $5, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: ## kill: def %ax killed %ax killed %eax
+; KNL-NEXT: ## kill: def $ax killed $ax killed $eax
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -376,7 +376,7 @@
; SKX-NEXT: kshiftrw $5, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: ## kill: def %ax killed %ax killed %eax
+; SKX-NEXT: ## kill: def $ax killed $ax killed $eax
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
@@ -386,7 +386,7 @@
; AVX512BW-NEXT: kshiftrw $5, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: andl $1, %eax
-; AVX512BW-NEXT: ## kill: def %ax killed %ax killed %eax
+; AVX512BW-NEXT: ## kill: def $ax killed $ax killed $eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -396,7 +396,7 @@
; AVX512DQ-NEXT: kshiftrw $5, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
; AVX512DQ-NEXT: andl $1, %eax
-; AVX512DQ-NEXT: ## kill: def %ax killed %ax killed %eax
+; AVX512DQ-NEXT: ## kill: def $ax killed $ax killed $eax
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%cmp_res = icmp ugt <16 x i32> %a, %b
@@ -412,7 +412,7 @@
; KNL-NEXT: kshiftrw $5, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: andb $1, %al
-; KNL-NEXT: ## kill: def %al killed %al killed %eax
+; KNL-NEXT: ## kill: def $al killed $al killed $eax
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -422,7 +422,7 @@
; SKX-NEXT: kshiftrw $5, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: andb $1, %al
-; SKX-NEXT: ## kill: def %al killed %al killed %eax
+; SKX-NEXT: ## kill: def $al killed $al killed $eax
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
@@ -432,7 +432,7 @@
; AVX512BW-NEXT: kshiftrw $5, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: andb $1, %al
-; AVX512BW-NEXT: ## kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: ## kill: def $al killed $al killed $eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -442,7 +442,7 @@
; AVX512DQ-NEXT: kshiftrw $5, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
; AVX512DQ-NEXT: andb $1, %al
-; AVX512DQ-NEXT: ## kill: def %al killed %al killed %eax
+; AVX512DQ-NEXT: ## kill: def $al killed $al killed $eax
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%cmp_res = icmp ugt <16 x i32> %a, %b
@@ -498,14 +498,14 @@
define <4 x i32> @test4(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64> %y1) {
; KNL-LABEL: test4:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %ymm3 killed %ymm3 def %zmm3
-; KNL-NEXT: ## kill: def %ymm2 killed %ymm2 def %zmm2
-; KNL-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
-; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: ## kill: def $ymm3 killed $ymm3 def $zmm3
+; KNL-NEXT: ## kill: def $ymm2 killed $ymm2 def $zmm2
+; KNL-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
+; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpcmpleq %zmm1, %zmm0, %k1
; KNL-NEXT: vpcmpgtq %zmm3, %zmm2, %k1 {%k1}
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -519,27 +519,27 @@
;
; AVX512BW-LABEL: test4:
; AVX512BW: ## %bb.0:
-; AVX512BW-NEXT: ## kill: def %ymm3 killed %ymm3 def %zmm3
-; AVX512BW-NEXT: ## kill: def %ymm2 killed %ymm2 def %zmm2
-; AVX512BW-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512BW-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: ## kill: def $ymm3 killed $ymm3 def $zmm3
+; AVX512BW-NEXT: ## kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512BW-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vpcmpleq %zmm1, %zmm0, %k1
; AVX512BW-NEXT: vpcmpgtq %zmm3, %zmm2, %k1 {%k1}
; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test4:
; AVX512DQ: ## %bb.0:
-; AVX512DQ-NEXT: ## kill: def %ymm3 killed %ymm3 def %zmm3
-; AVX512DQ-NEXT: ## kill: def %ymm2 killed %ymm2 def %zmm2
-; AVX512DQ-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512DQ-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: ## kill: def $ymm3 killed $ymm3 def $zmm3
+; AVX512DQ-NEXT: ## kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512DQ-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512DQ-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vpcmpleq %zmm1, %zmm0, %k1
; AVX512DQ-NEXT: vpcmpgtq %zmm3, %zmm2, %k0 {%k1}
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
-; AVX512DQ-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%x_gt_y = icmp sgt <4 x i64> %x, %y
@@ -552,14 +552,14 @@
define <2 x i64> @test5(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1, <2 x i64> %y1) {
; KNL-LABEL: test5:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %xmm3 killed %xmm3 def %zmm3
-; KNL-NEXT: ## kill: def %xmm2 killed %xmm2 def %zmm2
-; KNL-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL-NEXT: ## kill: def $xmm3 killed $xmm3 def $zmm3
+; KNL-NEXT: ## kill: def $xmm2 killed $xmm2 def $zmm2
+; KNL-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: vpcmpleq %zmm3, %zmm2, %k1
; KNL-NEXT: vpcmpgtq %zmm0, %zmm1, %k1 {%k1}
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -572,27 +572,27 @@
;
; AVX512BW-LABEL: test5:
; AVX512BW: ## %bb.0:
-; AVX512BW-NEXT: ## kill: def %xmm3 killed %xmm3 def %zmm3
-; AVX512BW-NEXT: ## kill: def %xmm2 killed %xmm2 def %zmm2
-; AVX512BW-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm3 killed $xmm3 def $zmm3
+; AVX512BW-NEXT: ## kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512BW-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-NEXT: vpcmpleq %zmm3, %zmm2, %k1
; AVX512BW-NEXT: vpcmpgtq %zmm0, %zmm1, %k1 {%k1}
; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test5:
; AVX512DQ: ## %bb.0:
-; AVX512DQ-NEXT: ## kill: def %xmm3 killed %xmm3 def %zmm3
-; AVX512DQ-NEXT: ## kill: def %xmm2 killed %xmm2 def %zmm2
-; AVX512DQ-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512DQ-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: ## kill: def $xmm3 killed $xmm3 def $zmm3
+; AVX512DQ-NEXT: ## kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512DQ-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512DQ-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vpcmpleq %zmm3, %zmm2, %k1
; AVX512DQ-NEXT: vpcmpgtq %zmm0, %zmm1, %k0 {%k1}
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
-; AVX512DQ-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%x_gt_y = icmp slt <2 x i64> %x, %y
@@ -713,13 +713,13 @@
; AVX512BW-NEXT: ## %bb.2:
; AVX512BW-NEXT: vpcmpltud %zmm2, %zmm1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
; AVX512BW-NEXT: LBB17_1:
; AVX512BW-NEXT: vpcmpgtd %zmm2, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -789,7 +789,7 @@
; AVX512BW-NEXT: LBB18_3:
; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -831,7 +831,7 @@
; KNL-NEXT: LBB20_3:
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -861,7 +861,7 @@
; AVX512BW-NEXT: LBB20_3:
; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k1
; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -877,7 +877,7 @@
; AVX512DQ-NEXT: LBB20_3:
; AVX512DQ-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
-; AVX512DQ-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%mask = icmp sgt i32 %a1, %b1
@@ -961,7 +961,7 @@
; AVX512BW-NEXT: cmovgw %ax, %cx
; AVX512BW-NEXT: kmovd %ecx, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1192,7 +1192,7 @@
; KNL-NEXT: korw %k0, %k1, %k1
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -1230,7 +1230,7 @@
; AVX512BW-NEXT: kshiftlw $7, %k0, %k0
; AVX512BW-NEXT: korw %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1251,7 +1251,7 @@
; AVX512DQ-NEXT: korb %k0, %k1, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512DQ-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
@@ -1997,7 +1997,7 @@
; KNL-NEXT: movzbl (%rdi), %eax
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -2012,7 +2012,7 @@
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -2020,7 +2020,7 @@
; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovb (%rdi), %k0
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
-; AVX512DQ-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = load <2 x i1>, <2 x i1>* %a
@@ -2034,7 +2034,7 @@
; KNL-NEXT: movzbl (%rdi), %eax
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -2049,7 +2049,7 @@
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -2057,7 +2057,7 @@
; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovb (%rdi), %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
-; AVX512DQ-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = load <4 x i1>, <4 x i1>* %a
@@ -2494,7 +2494,7 @@
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %ax killed %ax killed %eax
+; KNL-NEXT: ## kill: def $ax killed $ax killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: test_v16i1_add:
@@ -2503,7 +2503,7 @@
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kxorw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %ax killed %ax killed %eax
+; SKX-NEXT: ## kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v16i1_add:
@@ -2512,7 +2512,7 @@
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: def %ax killed %ax killed %eax
+; AVX512BW-NEXT: ## kill: def $ax killed $ax killed $eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v16i1_add:
@@ -2521,7 +2521,7 @@
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kxorw %k1, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: def %ax killed %ax killed %eax
+; AVX512DQ-NEXT: ## kill: def $ax killed $ax killed $eax
; AVX512DQ-NEXT: retq
%m0 = bitcast i16 %x to <16 x i1>
%m1 = bitcast i16 %y to <16 x i1>
@@ -2537,7 +2537,7 @@
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %ax killed %ax killed %eax
+; KNL-NEXT: ## kill: def $ax killed $ax killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: test_v16i1_sub:
@@ -2546,7 +2546,7 @@
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kxorw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %ax killed %ax killed %eax
+; SKX-NEXT: ## kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v16i1_sub:
@@ -2555,7 +2555,7 @@
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: def %ax killed %ax killed %eax
+; AVX512BW-NEXT: ## kill: def $ax killed $ax killed $eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v16i1_sub:
@@ -2564,7 +2564,7 @@
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kxorw %k1, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: def %ax killed %ax killed %eax
+; AVX512DQ-NEXT: ## kill: def $ax killed $ax killed $eax
; AVX512DQ-NEXT: retq
%m0 = bitcast i16 %x to <16 x i1>
%m1 = bitcast i16 %y to <16 x i1>
@@ -2580,7 +2580,7 @@
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kandw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %ax killed %ax killed %eax
+; KNL-NEXT: ## kill: def $ax killed $ax killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: test_v16i1_mul:
@@ -2589,7 +2589,7 @@
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kandw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %ax killed %ax killed %eax
+; SKX-NEXT: ## kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v16i1_mul:
@@ -2598,7 +2598,7 @@
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kandw %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: def %ax killed %ax killed %eax
+; AVX512BW-NEXT: ## kill: def $ax killed $ax killed $eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v16i1_mul:
@@ -2607,7 +2607,7 @@
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kandw %k1, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: def %ax killed %ax killed %eax
+; AVX512DQ-NEXT: ## kill: def $ax killed $ax killed $eax
; AVX512DQ-NEXT: retq
%m0 = bitcast i16 %x to <16 x i1>
%m1 = bitcast i16 %y to <16 x i1>
@@ -2623,7 +2623,7 @@
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %al killed %al killed %eax
+; KNL-NEXT: ## kill: def $al killed $al killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: test_v8i1_add:
@@ -2632,7 +2632,7 @@
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kxorb %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %al killed %al killed %eax
+; SKX-NEXT: ## kill: def $al killed $al killed $eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v8i1_add:
@@ -2641,7 +2641,7 @@
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: ## kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v8i1_add:
@@ -2650,7 +2650,7 @@
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kxorb %k1, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: def %al killed %al killed %eax
+; AVX512DQ-NEXT: ## kill: def $al killed $al killed $eax
; AVX512DQ-NEXT: retq
%m0 = bitcast i8 %x to <8 x i1>
%m1 = bitcast i8 %y to <8 x i1>
@@ -2666,7 +2666,7 @@
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %al killed %al killed %eax
+; KNL-NEXT: ## kill: def $al killed $al killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: test_v8i1_sub:
@@ -2675,7 +2675,7 @@
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kxorb %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %al killed %al killed %eax
+; SKX-NEXT: ## kill: def $al killed $al killed $eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v8i1_sub:
@@ -2684,7 +2684,7 @@
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: ## kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v8i1_sub:
@@ -2693,7 +2693,7 @@
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kxorb %k1, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: def %al killed %al killed %eax
+; AVX512DQ-NEXT: ## kill: def $al killed $al killed $eax
; AVX512DQ-NEXT: retq
%m0 = bitcast i8 %x to <8 x i1>
%m1 = bitcast i8 %y to <8 x i1>
@@ -2709,7 +2709,7 @@
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kandw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %al killed %al killed %eax
+; KNL-NEXT: ## kill: def $al killed $al killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: test_v8i1_mul:
@@ -2718,7 +2718,7 @@
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kandb %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %al killed %al killed %eax
+; SKX-NEXT: ## kill: def $al killed $al killed $eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v8i1_mul:
@@ -2727,7 +2727,7 @@
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kandw %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: ## kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v8i1_mul:
@@ -2736,7 +2736,7 @@
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kandb %k1, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: def %al killed %al killed %eax
+; AVX512DQ-NEXT: ## kill: def $al killed $al killed $eax
; AVX512DQ-NEXT: retq
%m0 = bitcast i8 %x to <8 x i1>
%m1 = bitcast i8 %y to <8 x i1>
diff --git a/llvm/test/CodeGen/X86/avx512-memfold.ll b/llvm/test/CodeGen/X86/avx512-memfold.ll
index 02c5131..4e12950 100644
--- a/llvm/test/CodeGen/X86/avx512-memfold.ll
+++ b/llvm/test/CodeGen/X86/avx512-memfold.ll
@@ -7,7 +7,7 @@
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vcmpunordss (%rdi), %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%b.val = load float, float* %b
%bv0 = insertelement <4 x float> undef, float %b.val, i32 0
diff --git a/llvm/test/CodeGen/X86/avx512-regcall-Mask.ll b/llvm/test/CodeGen/X86/avx512-regcall-Mask.ll
index 3bd69ef..b4aa3ef 100644
--- a/llvm/test/CodeGen/X86/avx512-regcall-Mask.ll
+++ b/llvm/test/CodeGen/X86/avx512-regcall-Mask.ll
@@ -310,9 +310,9 @@
; X32-NEXT: vpmovm2b %k2, %zmm0
; X32-NEXT: vpmovm2b %k1, %zmm1
; X32-NEXT: vpmovm2b %k0, %zmm2
-; X32-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
-; X32-NEXT: # kill: def %ymm1 killed %ymm1 killed %zmm1
-; X32-NEXT: # kill: def %ymm2 killed %ymm2 killed %zmm2
+; X32-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
+; X32-NEXT: # kill: def $ymm1 killed $ymm1 killed $zmm1
+; X32-NEXT: # kill: def $ymm2 killed $ymm2 killed $zmm2
; X32-NEXT: calll _test_argv32i1helper
; X32-NEXT: vmovups (%esp), %xmm4 # 16-byte Reload
; X32-NEXT: vmovups {{[0-9]+}}(%esp), %xmm5 # 16-byte Reload
@@ -340,9 +340,9 @@
; WIN64-NEXT: vpmovm2b %k2, %zmm0
; WIN64-NEXT: vpmovm2b %k1, %zmm1
; WIN64-NEXT: vpmovm2b %k0, %zmm2
-; WIN64-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
-; WIN64-NEXT: # kill: def %ymm1 killed %ymm1 killed %zmm1
-; WIN64-NEXT: # kill: def %ymm2 killed %ymm2 killed %zmm2
+; WIN64-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
+; WIN64-NEXT: # kill: def $ymm1 killed $ymm1 killed $zmm1
+; WIN64-NEXT: # kill: def $ymm2 killed $ymm2 killed $zmm2
; WIN64-NEXT: callq test_argv32i1helper
; WIN64-NEXT: nop
; WIN64-NEXT: addq $32, %rsp
@@ -384,9 +384,9 @@
; LINUXOSX64-NEXT: vpmovm2b %k2, %zmm0
; LINUXOSX64-NEXT: vpmovm2b %k1, %zmm1
; LINUXOSX64-NEXT: vpmovm2b %k0, %zmm2
-; LINUXOSX64-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
-; LINUXOSX64-NEXT: # kill: def %ymm1 killed %ymm1 killed %zmm1
-; LINUXOSX64-NEXT: # kill: def %ymm2 killed %ymm2 killed %zmm2
+; LINUXOSX64-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
+; LINUXOSX64-NEXT: # kill: def $ymm1 killed $ymm1 killed $zmm1
+; LINUXOSX64-NEXT: # kill: def $ymm2 killed $ymm2 killed $zmm2
; LINUXOSX64-NEXT: callq test_argv32i1helper
; LINUXOSX64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload
; LINUXOSX64-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm9 # 16-byte Reload
@@ -538,9 +538,9 @@
; X32-NEXT: vpmovm2b %k2, %zmm0
; X32-NEXT: vpmovm2b %k1, %zmm1
; X32-NEXT: vpmovm2b %k0, %zmm2
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
-; X32-NEXT: # kill: def %xmm1 killed %xmm1 killed %zmm1
-; X32-NEXT: # kill: def %xmm2 killed %xmm2 killed %zmm2
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; X32-NEXT: # kill: def $xmm1 killed $xmm1 killed $zmm1
+; X32-NEXT: # kill: def $xmm2 killed $xmm2 killed $zmm2
; X32-NEXT: vzeroupper
; X32-NEXT: calll _test_argv16i1helper
; X32-NEXT: vmovups (%esp), %xmm4 # 16-byte Reload
@@ -568,9 +568,9 @@
; WIN64-NEXT: vpmovm2b %k2, %zmm0
; WIN64-NEXT: vpmovm2b %k1, %zmm1
; WIN64-NEXT: vpmovm2b %k0, %zmm2
-; WIN64-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
-; WIN64-NEXT: # kill: def %xmm1 killed %xmm1 killed %zmm1
-; WIN64-NEXT: # kill: def %xmm2 killed %xmm2 killed %zmm2
+; WIN64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; WIN64-NEXT: # kill: def $xmm1 killed $xmm1 killed $zmm1
+; WIN64-NEXT: # kill: def $xmm2 killed $xmm2 killed $zmm2
; WIN64-NEXT: vzeroupper
; WIN64-NEXT: callq test_argv16i1helper
; WIN64-NEXT: nop
@@ -612,9 +612,9 @@
; LINUXOSX64-NEXT: vpmovm2b %k2, %zmm0
; LINUXOSX64-NEXT: vpmovm2b %k1, %zmm1
; LINUXOSX64-NEXT: vpmovm2b %k0, %zmm2
-; LINUXOSX64-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
-; LINUXOSX64-NEXT: # kill: def %xmm1 killed %xmm1 killed %zmm1
-; LINUXOSX64-NEXT: # kill: def %xmm2 killed %xmm2 killed %zmm2
+; LINUXOSX64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; LINUXOSX64-NEXT: # kill: def $xmm1 killed $xmm1 killed $zmm1
+; LINUXOSX64-NEXT: # kill: def $xmm2 killed $xmm2 killed $zmm2
; LINUXOSX64-NEXT: vzeroupper
; LINUXOSX64-NEXT: callq test_argv16i1helper
; LINUXOSX64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload
@@ -705,9 +705,9 @@
; X32-LABEL: caller_retv16i1:
; X32: # %bb.0: # %entry
; X32-NEXT: calll _test_retv16i1
-; X32-NEXT: # kill: def %ax killed %ax def %eax
+; X32-NEXT: # kill: def $ax killed $ax def $eax
; X32-NEXT: incl %eax
-; X32-NEXT: # kill: def %ax killed %ax killed %eax
+; X32-NEXT: # kill: def $ax killed $ax killed $eax
; X32-NEXT: retl
;
; WIN64-LABEL: caller_retv16i1:
@@ -724,9 +724,9 @@
; WIN64-NEXT: .seh_savexmm 6, 0
; WIN64-NEXT: .seh_endprologue
; WIN64-NEXT: callq test_retv16i1
-; WIN64-NEXT: # kill: def %ax killed %ax def %eax
+; WIN64-NEXT: # kill: def $ax killed $ax def $eax
; WIN64-NEXT: incl %eax
-; WIN64-NEXT: # kill: def %ax killed %ax killed %eax
+; WIN64-NEXT: # kill: def $ax killed $ax killed $eax
; WIN64-NEXT: vmovaps (%rsp), %xmm6 # 16-byte Reload
; WIN64-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm7 # 16-byte Reload
; WIN64-NEXT: addq $40, %rsp
@@ -742,9 +742,9 @@
; LINUXOSX64-NEXT: pushq %rax
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: callq test_retv16i1
-; LINUXOSX64-NEXT: # kill: def %ax killed %ax def %eax
+; LINUXOSX64-NEXT: # kill: def $ax killed $ax def $eax
; LINUXOSX64-NEXT: incl %eax
-; LINUXOSX64-NEXT: # kill: def %ax killed %ax killed %eax
+; LINUXOSX64-NEXT: # kill: def $ax killed $ax killed $eax
; LINUXOSX64-NEXT: popq %rcx
; LINUXOSX64-NEXT: retq
entry:
@@ -771,9 +771,9 @@
; X32-NEXT: vpmovm2w %k2, %zmm0
; X32-NEXT: vpmovm2w %k1, %zmm1
; X32-NEXT: vpmovm2w %k0, %zmm2
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
-; X32-NEXT: # kill: def %xmm1 killed %xmm1 killed %zmm1
-; X32-NEXT: # kill: def %xmm2 killed %xmm2 killed %zmm2
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; X32-NEXT: # kill: def $xmm1 killed $xmm1 killed $zmm1
+; X32-NEXT: # kill: def $xmm2 killed $xmm2 killed $zmm2
; X32-NEXT: vzeroupper
; X32-NEXT: calll _test_argv8i1helper
; X32-NEXT: vmovups (%esp), %xmm4 # 16-byte Reload
@@ -801,9 +801,9 @@
; WIN64-NEXT: vpmovm2w %k2, %zmm0
; WIN64-NEXT: vpmovm2w %k1, %zmm1
; WIN64-NEXT: vpmovm2w %k0, %zmm2
-; WIN64-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
-; WIN64-NEXT: # kill: def %xmm1 killed %xmm1 killed %zmm1
-; WIN64-NEXT: # kill: def %xmm2 killed %xmm2 killed %zmm2
+; WIN64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; WIN64-NEXT: # kill: def $xmm1 killed $xmm1 killed $zmm1
+; WIN64-NEXT: # kill: def $xmm2 killed $xmm2 killed $zmm2
; WIN64-NEXT: vzeroupper
; WIN64-NEXT: callq test_argv8i1helper
; WIN64-NEXT: nop
@@ -845,9 +845,9 @@
; LINUXOSX64-NEXT: vpmovm2w %k2, %zmm0
; LINUXOSX64-NEXT: vpmovm2w %k1, %zmm1
; LINUXOSX64-NEXT: vpmovm2w %k0, %zmm2
-; LINUXOSX64-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
-; LINUXOSX64-NEXT: # kill: def %xmm1 killed %xmm1 killed %zmm1
-; LINUXOSX64-NEXT: # kill: def %xmm2 killed %xmm2 killed %zmm2
+; LINUXOSX64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; LINUXOSX64-NEXT: # kill: def $xmm1 killed $xmm1 killed $zmm1
+; LINUXOSX64-NEXT: # kill: def $xmm2 killed $xmm2 killed $zmm2
; LINUXOSX64-NEXT: vzeroupper
; LINUXOSX64-NEXT: callq test_argv8i1helper
; LINUXOSX64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload
@@ -938,10 +938,10 @@
; X32-LABEL: caller_retv8i1:
; X32: # %bb.0: # %entry
; X32-NEXT: calll _test_retv8i1
-; X32-NEXT: # kill: def %al killed %al def %eax
+; X32-NEXT: # kill: def $al killed $al def $eax
; X32-NEXT: kmovd %eax, %k0
; X32-NEXT: vpmovm2w %k0, %zmm0
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
@@ -959,10 +959,10 @@
; WIN64-NEXT: .seh_savexmm 6, 0
; WIN64-NEXT: .seh_endprologue
; WIN64-NEXT: callq test_retv8i1
-; WIN64-NEXT: # kill: def %al killed %al def %eax
+; WIN64-NEXT: # kill: def $al killed $al def $eax
; WIN64-NEXT: kmovd %eax, %k0
; WIN64-NEXT: vpmovm2w %k0, %zmm0
-; WIN64-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; WIN64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; WIN64-NEXT: vmovaps (%rsp), %xmm6 # 16-byte Reload
; WIN64-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm7 # 16-byte Reload
; WIN64-NEXT: addq $40, %rsp
@@ -979,10 +979,10 @@
; LINUXOSX64-NEXT: pushq %rax
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: callq test_retv8i1
-; LINUXOSX64-NEXT: # kill: def %al killed %al def %eax
+; LINUXOSX64-NEXT: # kill: def $al killed $al def $eax
; LINUXOSX64-NEXT: kmovd %eax, %k0
; LINUXOSX64-NEXT: vpmovm2w %k0, %zmm0
-; LINUXOSX64-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; LINUXOSX64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; LINUXOSX64-NEXT: popq %rax
; LINUXOSX64-NEXT: vzeroupper
; LINUXOSX64-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/avx512-regcall-NoMask.ll b/llvm/test/CodeGen/X86/avx512-regcall-NoMask.ll
index 9096720..ebbf7b6 100644
--- a/llvm/test/CodeGen/X86/avx512-regcall-NoMask.ll
+++ b/llvm/test/CodeGen/X86/avx512-regcall-NoMask.ll
@@ -8,19 +8,19 @@
; X32-LABEL: test_argReti1:
; X32: # %bb.0:
; X32-NEXT: incb %al
-; X32-NEXT: # kill: def %al killed %al killed %eax
+; X32-NEXT: # kill: def $al killed $al killed $eax
; X32-NEXT: retl
;
; WIN64-LABEL: test_argReti1:
; WIN64: # %bb.0:
; WIN64-NEXT: incb %al
-; WIN64-NEXT: # kill: def %al killed %al killed %eax
+; WIN64-NEXT: # kill: def $al killed $al killed $eax
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argReti1:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: incb %al
-; LINUXOSX64-NEXT: # kill: def %al killed %al killed %eax
+; LINUXOSX64-NEXT: # kill: def $al killed $al killed $eax
; LINUXOSX64-NEXT: retq
%add = add i1 %a, 1
ret i1 %add
@@ -75,19 +75,19 @@
; X32-LABEL: test_argReti8:
; X32: # %bb.0:
; X32-NEXT: incb %al
-; X32-NEXT: # kill: def %al killed %al killed %eax
+; X32-NEXT: # kill: def $al killed $al killed $eax
; X32-NEXT: retl
;
; WIN64-LABEL: test_argReti8:
; WIN64: # %bb.0:
; WIN64-NEXT: incb %al
-; WIN64-NEXT: # kill: def %al killed %al killed %eax
+; WIN64-NEXT: # kill: def $al killed $al killed $eax
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argReti8:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: incb %al
-; LINUXOSX64-NEXT: # kill: def %al killed %al killed %eax
+; LINUXOSX64-NEXT: # kill: def $al killed $al killed $eax
; LINUXOSX64-NEXT: retq
%add = add i8 %a, 1
ret i8 %add
@@ -142,19 +142,19 @@
; X32-LABEL: test_argReti16:
; X32: # %bb.0:
; X32-NEXT: incl %eax
-; X32-NEXT: # kill: def %ax killed %ax killed %eax
+; X32-NEXT: # kill: def $ax killed $ax killed $eax
; X32-NEXT: retl
;
; WIN64-LABEL: test_argReti16:
; WIN64: # %bb.0:
; WIN64-NEXT: incl %eax
-; WIN64-NEXT: # kill: def %ax killed %ax killed %eax
+; WIN64-NEXT: # kill: def $ax killed $ax killed $eax
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argReti16:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: incl %eax
-; LINUXOSX64-NEXT: # kill: def %ax killed %ax killed %eax
+; LINUXOSX64-NEXT: # kill: def $ax killed $ax killed $eax
; LINUXOSX64-NEXT: retq
%add = add i16 %a, 1
ret i16 %add
@@ -167,9 +167,9 @@
; X32-NEXT: pushl %esp
; X32-NEXT: incl %eax
; X32-NEXT: calll _test_argReti16
-; X32-NEXT: # kill: def %ax killed %ax def %eax
+; X32-NEXT: # kill: def $ax killed $ax def $eax
; X32-NEXT: incl %eax
-; X32-NEXT: # kill: def %ax killed %ax killed %eax
+; X32-NEXT: # kill: def $ax killed $ax killed $eax
; X32-NEXT: popl %esp
; X32-NEXT: retl
;
@@ -180,9 +180,9 @@
; WIN64-NEXT: .seh_endprologue
; WIN64-NEXT: incl %eax
; WIN64-NEXT: callq test_argReti16
-; WIN64-NEXT: # kill: def %ax killed %ax def %eax
+; WIN64-NEXT: # kill: def $ax killed $ax def $eax
; WIN64-NEXT: incl %eax
-; WIN64-NEXT: # kill: def %ax killed %ax killed %eax
+; WIN64-NEXT: # kill: def $ax killed $ax killed $eax
; WIN64-NEXT: popq %rsp
; WIN64-NEXT: retq
; WIN64-NEXT: .seh_handlerdata
@@ -196,9 +196,9 @@
; LINUXOSX64-NEXT: .cfi_offset %rsp, -16
; LINUXOSX64-NEXT: incl %eax
; LINUXOSX64-NEXT: callq test_argReti16
-; LINUXOSX64-NEXT: # kill: def %ax killed %ax def %eax
+; LINUXOSX64-NEXT: # kill: def $ax killed $ax def $eax
; LINUXOSX64-NEXT: incl %eax
-; LINUXOSX64-NEXT: # kill: def %ax killed %ax killed %eax
+; LINUXOSX64-NEXT: # kill: def $ax killed $ax killed $eax
; LINUXOSX64-NEXT: popq %rsp
; LINUXOSX64-NEXT: retq
%b = add i16 %a, 1
diff --git a/llvm/test/CodeGen/X86/avx512-schedule.ll b/llvm/test/CodeGen/X86/avx512-schedule.ll
index 1f9fa52..cb223c8 100755
--- a/llvm/test/CodeGen/X86/avx512-schedule.ll
+++ b/llvm/test/CodeGen/X86/avx512-schedule.ll
@@ -4281,7 +4281,7 @@
; GENERIC-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %xmm0, %k0 # sched: [1:0.33]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
+; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: trunc_16i8_to_16i1:
@@ -4289,7 +4289,7 @@
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %xmm0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %ax killed %ax killed %eax
+; SKX-NEXT: # kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
%mask_b = trunc <16 x i8>%a to <16 x i1>
%mask = bitcast <16 x i1> %mask_b to i16
@@ -4302,7 +4302,7 @@
; GENERIC-NEXT: vpslld $31, %zmm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: vptestmd %zmm0, %zmm0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
+; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: vzeroupper # sched: [100:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
@@ -4311,7 +4311,7 @@
; SKX-NEXT: vpslld $31, %zmm0, %zmm0 # sched: [1:0.50]
; SKX-NEXT: vptestmd %zmm0, %zmm0, %k0 # sched: [3:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %ax killed %ax killed %eax
+; SKX-NEXT: # kill: def $ax killed $ax killed $eax
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%mask_b = trunc <16 x i32>%a to <16 x i1>
@@ -4347,7 +4347,7 @@
; GENERIC-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovw2m %xmm0, %k0 # sched: [1:0.33]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %al killed %al killed %eax
+; GENERIC-NEXT: # kill: def $al killed $al killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: trunc_8i16_to_8i1:
@@ -4355,7 +4355,7 @@
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovw2m %xmm0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %al killed %al killed %eax
+; SKX-NEXT: # kill: def $al killed $al killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
%mask_b = trunc <8 x i16>%a to <8 x i1>
%mask = bitcast <8 x i1> %mask_b to i8
@@ -4392,7 +4392,7 @@
; GENERIC-NEXT: kmovw %edi, %k1 # sched: [1:0.33]
; GENERIC-NEXT: korw %k1, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
+; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: trunc_i32_to_i1:
@@ -4405,7 +4405,7 @@
; SKX-NEXT: kmovw %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: korw %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %ax killed %ax killed %eax
+; SKX-NEXT: # kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
%a_i = trunc i32 %a to i1
%maskv = insertelement <16 x i1> <i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, i1 %a_i, i32 0
@@ -6666,7 +6666,7 @@
; GENERIC-NEXT: kmovd %edi, %k0 # sched: [1:0.33]
; GENERIC-NEXT: knotw %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
+; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mask16:
@@ -6674,7 +6674,7 @@
; SKX-NEXT: kmovd %edi, %k0 # sched: [1:1.00]
; SKX-NEXT: knotw %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %ax killed %ax killed %eax
+; SKX-NEXT: # kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i16 %x to <16 x i1>
%m1 = xor <16 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
@@ -6709,7 +6709,7 @@
; GENERIC-NEXT: kmovd %edi, %k0 # sched: [1:0.33]
; GENERIC-NEXT: knotb %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %al killed %al killed %eax
+; GENERIC-NEXT: # kill: def $al killed $al killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mask8:
@@ -6717,7 +6717,7 @@
; SKX-NEXT: kmovd %edi, %k0 # sched: [1:1.00]
; SKX-NEXT: knotb %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %al killed %al killed %eax
+; SKX-NEXT: # kill: def $al killed $al killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i8 %x to <8 x i1>
%m1 = xor <8 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
@@ -6826,7 +6826,7 @@
; GENERIC-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: korw %k0, %k2, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
+; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mand16_mem:
@@ -6837,7 +6837,7 @@
; SKX-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: korw %k0, %k2, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %ax killed %ax killed %eax
+; SKX-NEXT: # kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
%ma = load <16 x i1>, <16 x i1>* %x
%mb = load <16 x i1>, <16 x i1>* %y
@@ -6854,7 +6854,7 @@
; GENERIC-NEXT: kmovd %edi, %k0 # sched: [1:0.33]
; GENERIC-NEXT: kshiftrw $8, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %al killed %al killed %eax
+; GENERIC-NEXT: # kill: def $al killed $al killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: shuf_test1:
@@ -6862,7 +6862,7 @@
; SKX-NEXT: kmovd %edi, %k0 # sched: [1:1.00]
; SKX-NEXT: kshiftrw $8, %k0, %k0 # sched: [3:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %al killed %al killed %eax
+; SKX-NEXT: # kill: def $al killed $al killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
%v1 = bitcast i16 %v to <16 x i1>
%mask = shufflevector <16 x i1> %v1, <16 x i1> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -6901,7 +6901,7 @@
; GENERIC-NEXT: kshiftrw $5, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
; GENERIC-NEXT: andl $1, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
+; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: vzeroupper # sched: [100:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
@@ -6911,7 +6911,7 @@
; SKX-NEXT: kshiftrw $5, %k0, %k0 # sched: [3:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
; SKX-NEXT: andl $1, %eax # sched: [1:0.25]
-; SKX-NEXT: # kill: def %ax killed %ax killed %eax
+; SKX-NEXT: # kill: def $ax killed $ax killed $eax
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%cmp_res = icmp ugt <16 x i32> %a, %b
@@ -6927,7 +6927,7 @@
; GENERIC-NEXT: kshiftrw $5, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
; GENERIC-NEXT: andb $1, %al # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %al killed %al killed %eax
+; GENERIC-NEXT: # kill: def $al killed $al killed $eax
; GENERIC-NEXT: vzeroupper # sched: [100:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
@@ -6937,7 +6937,7 @@
; SKX-NEXT: kshiftrw $5, %k0, %k0 # sched: [3:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
; SKX-NEXT: andb $1, %al # sched: [1:0.25]
-; SKX-NEXT: # kill: def %al killed %al killed %eax
+; SKX-NEXT: # kill: def $al killed $al killed $eax
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%cmp_res = icmp ugt <16 x i32> %a, %b
@@ -8027,7 +8027,7 @@
; GENERIC-NEXT: kmovd %esi, %k1 # sched: [1:0.33]
; GENERIC-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
+; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v16i1_add:
@@ -8036,7 +8036,7 @@
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %ax killed %ax killed %eax
+; SKX-NEXT: # kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i16 %x to <16 x i1>
%m1 = bitcast i16 %y to <16 x i1>
@@ -8052,7 +8052,7 @@
; GENERIC-NEXT: kmovd %esi, %k1 # sched: [1:0.33]
; GENERIC-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
+; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v16i1_sub:
@@ -8061,7 +8061,7 @@
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %ax killed %ax killed %eax
+; SKX-NEXT: # kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i16 %x to <16 x i1>
%m1 = bitcast i16 %y to <16 x i1>
@@ -8077,7 +8077,7 @@
; GENERIC-NEXT: kmovd %esi, %k1 # sched: [1:0.33]
; GENERIC-NEXT: kandw %k1, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
+; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v16i1_mul:
@@ -8086,7 +8086,7 @@
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kandw %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %ax killed %ax killed %eax
+; SKX-NEXT: # kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i16 %x to <16 x i1>
%m1 = bitcast i16 %y to <16 x i1>
@@ -8102,7 +8102,7 @@
; GENERIC-NEXT: kmovd %esi, %k1 # sched: [1:0.33]
; GENERIC-NEXT: kxorb %k1, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %al killed %al killed %eax
+; GENERIC-NEXT: # kill: def $al killed $al killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v8i1_add:
@@ -8111,7 +8111,7 @@
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kxorb %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %al killed %al killed %eax
+; SKX-NEXT: # kill: def $al killed $al killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i8 %x to <8 x i1>
%m1 = bitcast i8 %y to <8 x i1>
@@ -8127,7 +8127,7 @@
; GENERIC-NEXT: kmovd %esi, %k1 # sched: [1:0.33]
; GENERIC-NEXT: kxorb %k1, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %al killed %al killed %eax
+; GENERIC-NEXT: # kill: def $al killed $al killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v8i1_sub:
@@ -8136,7 +8136,7 @@
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kxorb %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %al killed %al killed %eax
+; SKX-NEXT: # kill: def $al killed $al killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i8 %x to <8 x i1>
%m1 = bitcast i8 %y to <8 x i1>
@@ -8152,7 +8152,7 @@
; GENERIC-NEXT: kmovd %esi, %k1 # sched: [1:0.33]
; GENERIC-NEXT: kandb %k1, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %al killed %al killed %eax
+; GENERIC-NEXT: # kill: def $al killed $al killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v8i1_mul:
@@ -8161,7 +8161,7 @@
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kandb %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %al killed %al killed %eax
+; SKX-NEXT: # kill: def $al killed $al killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i8 %x to <8 x i1>
%m1 = bitcast i8 %y to <8 x i1>
diff --git a/llvm/test/CodeGen/X86/avx512-select.ll b/llvm/test/CodeGen/X86/avx512-select.ll
index 6491863..42f2a8f 100644
--- a/llvm/test/CodeGen/X86/avx512-select.ll
+++ b/llvm/test/CodeGen/X86/avx512-select.ll
@@ -155,7 +155,7 @@
; X86-NEXT: kmovw %eax, %k1
; X86-NEXT: korw %k1, %k0, %k0
; X86-NEXT: kmovw %k0, %eax
-; X86-NEXT: # kill: def %al killed %al killed %eax
+; X86-NEXT: # kill: def $al killed $al killed $eax
; X86-NEXT: retl
;
; X64-LABEL: select05_mem:
@@ -166,7 +166,7 @@
; X64-NEXT: kmovw %eax, %k1
; X64-NEXT: korw %k1, %k0, %k0
; X64-NEXT: kmovw %k0, %eax
-; X64-NEXT: # kill: def %al killed %al killed %eax
+; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
%mask = load <8 x i1> , <8 x i1>* %m
%a = load <8 x i1> , <8 x i1>* %a.0
@@ -205,7 +205,7 @@
; X86-NEXT: kmovw %eax, %k1
; X86-NEXT: kandw %k1, %k0, %k0
; X86-NEXT: kmovw %k0, %eax
-; X86-NEXT: # kill: def %al killed %al killed %eax
+; X86-NEXT: # kill: def $al killed $al killed $eax
; X86-NEXT: retl
;
; X64-LABEL: select06_mem:
@@ -216,7 +216,7 @@
; X64-NEXT: kmovw %eax, %k1
; X64-NEXT: kandw %k1, %k0, %k0
; X64-NEXT: kmovw %k0, %eax
-; X64-NEXT: # kill: def %al killed %al killed %eax
+; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
%mask = load <8 x i1> , <8 x i1>* %m
%a = load <8 x i1> , <8 x i1>* %a.0
@@ -237,7 +237,7 @@
; X86-NEXT: kandw %k0, %k1, %k0
; X86-NEXT: korw %k2, %k0, %k0
; X86-NEXT: kmovw %k0, %eax
-; X86-NEXT: # kill: def %al killed %al killed %eax
+; X86-NEXT: # kill: def $al killed $al killed $eax
; X86-NEXT: retl
;
; X64-LABEL: select07:
@@ -249,7 +249,7 @@
; X64-NEXT: kandw %k0, %k1, %k0
; X64-NEXT: korw %k2, %k0, %k0
; X64-NEXT: kmovw %k0, %eax
-; X64-NEXT: # kill: def %al killed %al killed %eax
+; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
%mask = bitcast i8 %m to <8 x i1>
%a = bitcast i8 %a.0 to <8 x i1>
diff --git a/llvm/test/CodeGen/X86/avx512-shift.ll b/llvm/test/CodeGen/X86/avx512-shift.ll
index eb424a8..eda27c2 100644
--- a/llvm/test/CodeGen/X86/avx512-shift.ll
+++ b/llvm/test/CodeGen/X86/avx512-shift.ll
@@ -34,7 +34,7 @@
; KNL-NEXT: vpsrlq $1, %ymm0, %ymm0
; KNL-NEXT: vpsllq $12, %ymm0, %ymm0
; KNL-NEXT: vpsraq $12, %zmm0, %zmm0
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: shift_4_i64:
@@ -106,10 +106,10 @@
define <4 x i64> @variable_sra3(<4 x i64> %x, <4 x i64> %y) {
; KNL-LABEL: variable_sra3:
; KNL: # %bb.0:
-; KNL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpsravq %zmm1, %zmm0, %zmm0
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: variable_sra3:
@@ -127,7 +127,7 @@
; KNL-NEXT: vpmovsxwd %xmm0, %ymm0
; KNL-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: variable_sra4:
diff --git a/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll b/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
index decaec0..c4eeda3 100644
--- a/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
+++ b/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
@@ -789,7 +789,7 @@
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = <16,17,5,1,14,14,13,17,u,u,u,u,u,u,u,u>
; CHECK-NEXT: vpermi2w %ymm1, %ymm2, %ymm0
-; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <32 x i16>, <32 x i16>* %vp
@@ -911,7 +911,7 @@
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = <19,1,5,31,9,12,17,9,u,u,u,u,u,u,u,u>
; CHECK-NEXT: vpermi2w %ymm2, %ymm1, %ymm0
-; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <32 x i16>, <32 x i16>* %vp
@@ -1710,7 +1710,7 @@
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = <13,0,0,6,u,u,u,u>
; CHECK-NEXT: vpermi2d %ymm2, %ymm1, %ymm0
-; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x i32>, <16 x i32>* %vp
@@ -3681,7 +3681,7 @@
; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = <3,3,15,9,u,u,u,u>
; CHECK-NEXT: vpermi2ps %ymm2, %ymm1, %ymm0
-; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x float>, <16 x float>* %vp
@@ -4565,7 +4565,7 @@
; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovapd {{.*#+}} ymm0 = [1,6,3,6]
; CHECK-NEXT: vpermi2pd %ymm2, %ymm1, %ymm0
-; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <8 x double>, <8 x double>* %vp
diff --git a/llvm/test/CodeGen/X86/avx512-trunc.ll b/llvm/test/CodeGen/X86/avx512-trunc.ll
index 14a05b0..99af100 100644
--- a/llvm/test/CodeGen/X86/avx512-trunc.ll
+++ b/llvm/test/CodeGen/X86/avx512-trunc.ll
@@ -57,9 +57,9 @@
define <4 x i8> @trunc_qb_256(<4 x i64> %i) #0 {
; KNL-LABEL: trunc_qb_256:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpmovqd %zmm0, %ymm0
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -75,7 +75,7 @@
define void @trunc_qb_256_mem(<4 x i64> %i, <4 x i8>* %res) #0 {
; KNL-LABEL: trunc_qb_256_mem:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; KNL-NEXT: vmovd %xmm0, (%rdi)
@@ -140,9 +140,9 @@
define <4 x i16> @trunc_qw_256(<4 x i64> %i) #0 {
; KNL-LABEL: trunc_qw_256:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpmovqd %zmm0, %ymm0
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -158,7 +158,7 @@
define void @trunc_qw_256_mem(<4 x i64> %i, <4 x i16>* %res) #0 {
; KNL-LABEL: trunc_qw_256_mem:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; KNL-NEXT: vmovq %xmm0, (%rdi)
@@ -223,9 +223,9 @@
define <4 x i32> @trunc_qd_256(<4 x i64> %i) #0 {
; KNL-LABEL: trunc_qd_256:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpmovqd %zmm0, %ymm0
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -241,7 +241,7 @@
define void @trunc_qd_256_mem(<4 x i64> %i, <4 x i32>* %res) #0 {
; KNL-LABEL: trunc_qd_256_mem:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: vmovdqa %xmm0, (%rdi)
; KNL-NEXT: vzeroupper
@@ -305,9 +305,9 @@
define <8 x i8> @trunc_db_256(<8 x i32> %i) #0 {
; KNL-LABEL: trunc_db_256:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -323,7 +323,7 @@
define void @trunc_db_256_mem(<8 x i32> %i, <8 x i8>* %res) #0 {
; KNL-LABEL: trunc_db_256_mem:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; KNL-NEXT: vmovq %xmm0, (%rdi)
@@ -387,9 +387,9 @@
define <8 x i16> @trunc_dw_256(<8 x i32> %i) #0 {
; KNL-LABEL: trunc_dw_256:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -405,7 +405,7 @@
define void @trunc_dw_256_mem(<8 x i32> %i, <8 x i16>* %res) #0 {
; KNL-LABEL: trunc_dw_256_mem:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
; KNL-NEXT: vmovdqa %xmm0, (%rdi)
; KNL-NEXT: vzeroupper
diff --git a/llvm/test/CodeGen/X86/avx512-vbroadcast.ll b/llvm/test/CodeGen/X86/avx512-vbroadcast.ll
index 9792c49..00e654d 100644
--- a/llvm/test/CodeGen/X86/avx512-vbroadcast.ll
+++ b/llvm/test/CodeGen/X86/avx512-vbroadcast.ll
@@ -120,7 +120,7 @@
define <8 x double> @_sd8xdouble_mask(double %a, <8 x double> %i, <8 x i32> %mask1) {
; ALL-LABEL: _sd8xdouble_mask:
; ALL: # %bb.0:
-; ALL-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
+; ALL-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2
; ALL-NEXT: vptestmd %zmm2, %zmm2, %k1
; ALL-NEXT: vbroadcastsd %xmm0, %zmm1 {%k1}
; ALL-NEXT: vmovapd %zmm1, %zmm0
@@ -135,7 +135,7 @@
define <8 x double> @_sd8xdouble_maskz(double %a, <8 x i32> %mask1) {
; ALL-LABEL: _sd8xdouble_maskz:
; ALL: # %bb.0:
-; ALL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; ALL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
; ALL-NEXT: vptestmd %zmm1, %zmm1, %k1
; ALL-NEXT: vbroadcastsd %xmm0, %zmm0 {%k1} {z}
; ALL-NEXT: retq
@@ -160,7 +160,7 @@
define <8 x double> @_sd8xdouble_mask_load(double* %a.ptr, <8 x double> %i, <8 x i32> %mask1) {
; ALL-LABEL: _sd8xdouble_mask_load:
; ALL: # %bb.0:
-; ALL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; ALL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
; ALL-NEXT: vptestmd %zmm1, %zmm1, %k1
; ALL-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1}
; ALL-NEXT: retq
@@ -175,7 +175,7 @@
define <8 x double> @_sd8xdouble_maskz_load(double* %a.ptr, <8 x i32> %mask1) {
; ALL-LABEL: _sd8xdouble_maskz_load:
; ALL: # %bb.0:
-; ALL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; ALL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; ALL-NEXT: vptestmd %zmm0, %zmm0, %k1
; ALL-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1} {z}
; ALL-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/avx512-vec-cmp.ll b/llvm/test/CodeGen/X86/avx512-vec-cmp.ll
index b84d61b..dd2b8f4 100644
--- a/llvm/test/CodeGen/X86/avx512-vec-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx512-vec-cmp.ll
@@ -73,12 +73,12 @@
define <4 x float> @test7(<4 x float> %a, <4 x float> %b) {
; AVX512-LABEL: test7:
; AVX512: ## %bb.0:
-; AVX512-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vcmpltps %zmm2, %zmm0, %k1
; AVX512-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -97,12 +97,12 @@
define <2 x double> @test8(<2 x double> %a, <2 x double> %b) {
; AVX512-LABEL: test8:
; AVX512: ## %bb.0:
-; AVX512-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vcmpltpd %zmm2, %zmm0, %k1
; AVX512-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -120,11 +120,11 @@
define <8 x i32> @test9(<8 x i32> %x, <8 x i32> %y) nounwind {
; AVX512-LABEL: test9:
; AVX512: ## %bb.0:
-; AVX512-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; AVX512-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
;
; SKX-LABEL: test9:
@@ -140,11 +140,11 @@
define <8 x float> @test10(<8 x float> %x, <8 x float> %y) nounwind {
; AVX512-LABEL: test10:
; AVX512: ## %bb.0:
-; AVX512-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; AVX512-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
;
; SKX-LABEL: test10:
@@ -175,7 +175,7 @@
; KNL-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
; KNL-NEXT: kunpckbw %k0, %k1, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %ax killed %ax killed %eax
+; KNL-NEXT: ## kill: def $ax killed $ax killed $eax
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -185,7 +185,7 @@
; AVX512BW-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
; AVX512BW-NEXT: kunpckbw %k0, %k1, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: def %ax killed %ax killed %eax
+; AVX512BW-NEXT: ## kill: def $ax killed $ax killed $eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -195,7 +195,7 @@
; SKX-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
; SKX-NEXT: kunpckbw %k0, %k1, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %ax killed %ax killed %eax
+; SKX-NEXT: ## kill: def $ax killed $ax killed $eax
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%res = icmp eq <16 x i64> %a, %b
@@ -503,7 +503,7 @@
; AVX512-NEXT: vpcmpgtq %zmm3, %zmm2, %k1
; AVX512-NEXT: kxnorw %k1, %k0, %k1
; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
;
; SKX-LABEL: test28:
@@ -537,7 +537,7 @@
; AVX512BW-NEXT: vpcmpgtd %zmm3, %zmm2, %k1
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -559,11 +559,11 @@
define <4 x double> @test30(<4 x double> %x, <4 x double> %y) nounwind {
; AVX512-LABEL: test30:
; AVX512: ## %bb.0:
-; AVX512-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vcmpeqpd %zmm1, %zmm0, %k1
; AVX512-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
;
; SKX-LABEL: test30:
@@ -580,12 +580,12 @@
define <2 x double> @test31(<2 x double> %x, <2 x double> %x1, <2 x double>* %yp) nounwind {
; AVX512-LABEL: test31:
; AVX512: ## %bb.0:
-; AVX512-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512-NEXT: vmovupd (%rdi), %xmm2
; AVX512-NEXT: vcmpltpd %zmm2, %zmm0, %k1
; AVX512-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -604,12 +604,12 @@
define <4 x double> @test32(<4 x double> %x, <4 x double> %x1, <4 x double>* %yp) nounwind {
; AVX512-LABEL: test32:
; AVX512: ## %bb.0:
-; AVX512-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vmovupd (%rdi), %ymm2
; AVX512-NEXT: vcmpltpd %zmm2, %zmm0, %k1
; AVX512-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
;
; SKX-LABEL: test32:
@@ -639,12 +639,12 @@
define <4 x float> @test34(<4 x float> %x, <4 x float> %x1, <4 x float>* %yp) nounwind {
; AVX512-LABEL: test34:
; AVX512: ## %bb.0:
-; AVX512-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512-NEXT: vmovups (%rdi), %xmm2
; AVX512-NEXT: vcmpltps %zmm2, %zmm0, %k1
; AVX512-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -662,12 +662,12 @@
define <8 x float> @test35(<8 x float> %x, <8 x float> %x1, <8 x float>* %yp) nounwind {
; AVX512-LABEL: test35:
; AVX512: ## %bb.0:
-; AVX512-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vmovups (%rdi), %ymm2
; AVX512-NEXT: vcmpltps %zmm2, %zmm0, %k1
; AVX512-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
;
; SKX-LABEL: test35:
@@ -713,12 +713,12 @@
define <4 x double> @test38(<4 x double> %x, <4 x double> %x1, double* %ptr) nounwind {
; AVX512-LABEL: test38:
; AVX512: ## %bb.0:
-; AVX512-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vbroadcastsd (%rdi), %ymm2
; AVX512-NEXT: vcmpltpd %zmm2, %zmm0, %k1
; AVX512-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
;
; SKX-LABEL: test38:
@@ -739,12 +739,12 @@
define <2 x double> @test39(<2 x double> %x, <2 x double> %x1, double* %ptr) nounwind {
; AVX512-LABEL: test39:
; AVX512: ## %bb.0:
-; AVX512-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0]
; AVX512-NEXT: vcmpltpd %zmm2, %zmm0, %k1
; AVX512-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -783,12 +783,12 @@
define <8 x float> @test41(<8 x float> %x, <8 x float> %x1, float* %ptr) nounwind {
; AVX512-LABEL: test41:
; AVX512: ## %bb.0:
-; AVX512-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vbroadcastss (%rdi), %ymm2
; AVX512-NEXT: vcmpltps %zmm2, %zmm0, %k1
; AVX512-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
;
; SKX-LABEL: test41:
@@ -809,12 +809,12 @@
define <4 x float> @test42(<4 x float> %x, <4 x float> %x1, float* %ptr) nounwind {
; AVX512-LABEL: test42:
; AVX512: ## %bb.0:
-; AVX512-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512-NEXT: vbroadcastss (%rdi), %xmm2
; AVX512-NEXT: vcmpltps %zmm2, %zmm0, %k1
; AVX512-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -935,11 +935,11 @@
;
; AVX512BW-LABEL: test47:
; AVX512BW: ## %bb.0:
-; AVX512BW-NEXT: ## kill: def %xmm2 killed %xmm2 def %zmm2
-; AVX512BW-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512BW-NEXT: ## kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512BW-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
; AVX512BW-NEXT: vptestnmd %zmm0, %zmm0, %k1
; AVX512BW-NEXT: vpblendmb %zmm1, %zmm2, %zmm0 {%k1}
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -965,11 +965,11 @@
;
; AVX512BW-LABEL: test48:
; AVX512BW: ## %bb.0:
-; AVX512BW-NEXT: ## kill: def %ymm2 killed %ymm2 def %zmm2
-; AVX512BW-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512BW-NEXT: ## kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512BW-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
; AVX512BW-NEXT: vptestnmd %zmm0, %zmm0, %k1
; AVX512BW-NEXT: vpblendmw %zmm1, %zmm2, %zmm0 {%k1}
-; AVX512BW-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
;
; SKX-LABEL: test48:
@@ -994,11 +994,11 @@
;
; AVX512BW-LABEL: test49:
; AVX512BW: ## %bb.0:
-; AVX512BW-NEXT: ## kill: def %xmm2 killed %xmm2 def %zmm2
-; AVX512BW-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512BW-NEXT: ## kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512BW-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
; AVX512BW-NEXT: vptestnmq %zmm0, %zmm0, %k1
; AVX512BW-NEXT: vpblendmw %zmm1, %zmm2, %zmm0 {%k1}
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/avx512-vec3-crash.ll b/llvm/test/CodeGen/X86/avx512-vec3-crash.ll
index 34094e9..7b2513c 100644
--- a/llvm/test/CodeGen/X86/avx512-vec3-crash.ll
+++ b/llvm/test/CodeGen/X86/avx512-vec3-crash.ll
@@ -19,9 +19,9 @@
; CHECK-NEXT: vpextrb $0, %xmm0, %eax
; CHECK-NEXT: vpextrb $4, %xmm0, %edx
; CHECK-NEXT: vpextrb $8, %xmm0, %ecx
-; CHECK-NEXT: # kill: def %al killed %al killed %eax
-; CHECK-NEXT: # kill: def %dl killed %dl killed %edx
-; CHECK-NEXT: # kill: def %cl killed %cl killed %ecx
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
+; CHECK-NEXT: # kill: def $dl killed $dl killed $edx
+; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
; CHECK-NEXT: retq
%cmp.i = icmp slt <3 x i8> %x, %a
%res = sext <3 x i1> %cmp.i to <3 x i8>
diff --git a/llvm/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll b/llvm/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll
index ad83422..32891ca 100644
--- a/llvm/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll
+++ b/llvm/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll
@@ -1967,7 +1967,7 @@
; AVX512F-32-NEXT: kmovd %edx, %k7
; AVX512F-32-NEXT: movl %ebp, %edx
; AVX512F-32-NEXT: shrl $24, %edx
-; AVX512F-32-NEXT: # kill: def %al killed %al killed %eax def %eax
+; AVX512F-32-NEXT: # kill: def $al killed $al killed $eax def $eax
; AVX512F-32-NEXT: shrb $7, %al
; AVX512F-32-NEXT: kshiftlq $63, %k4, %k4
; AVX512F-32-NEXT: kshiftrq $47, %k4, %k4
@@ -1982,7 +1982,7 @@
; AVX512F-32-NEXT: kshiftrq $18, %k4, %k3
; AVX512F-32-NEXT: kxorq %k6, %k3, %k6
; AVX512F-32-NEXT: kmovd %edx, %k3
-; AVX512F-32-NEXT: # kill: def %dl killed %dl killed %edx def %edx
+; AVX512F-32-NEXT: # kill: def $dl killed $dl killed $edx def $edx
; AVX512F-32-NEXT: andb $15, %dl
; AVX512F-32-NEXT: andb $2, %al
; AVX512F-32-NEXT: shrb %al
@@ -2232,7 +2232,7 @@
; AVX512F-32-NEXT: kmovd %ecx, %k5
; AVX512F-32-NEXT: movl %ebx, %edx
; AVX512F-32-NEXT: shrl $24, %edx
-; AVX512F-32-NEXT: # kill: def %al killed %al killed %eax def %eax
+; AVX512F-32-NEXT: # kill: def $al killed $al killed $eax def $eax
; AVX512F-32-NEXT: shrb $7, %al
; AVX512F-32-NEXT: kshiftlq $63, %k6, %k6
; AVX512F-32-NEXT: kshiftrq $15, %k6, %k6
@@ -2248,7 +2248,7 @@
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k7 # 8-byte Reload
; AVX512F-32-NEXT: kxorq %k7, %k1, %k7
; AVX512F-32-NEXT: kmovd %edx, %k1
-; AVX512F-32-NEXT: # kill: def %dl killed %dl killed %edx def %edx
+; AVX512F-32-NEXT: # kill: def $dl killed $dl killed $edx def $edx
; AVX512F-32-NEXT: andb $15, %dl
; AVX512F-32-NEXT: andb $2, %al
; AVX512F-32-NEXT: shrb %al
@@ -2667,7 +2667,7 @@
; AVX512F-32-NEXT: kmovd %edx, %k7
; AVX512F-32-NEXT: movl %ebp, %edx
; AVX512F-32-NEXT: shrl $24, %edx
-; AVX512F-32-NEXT: # kill: def %al killed %al killed %eax def %eax
+; AVX512F-32-NEXT: # kill: def $al killed $al killed $eax def $eax
; AVX512F-32-NEXT: shrb $7, %al
; AVX512F-32-NEXT: kshiftlq $63, %k4, %k4
; AVX512F-32-NEXT: kshiftrq $47, %k4, %k4
@@ -2682,7 +2682,7 @@
; AVX512F-32-NEXT: kshiftrq $18, %k4, %k3
; AVX512F-32-NEXT: kxorq %k6, %k3, %k6
; AVX512F-32-NEXT: kmovd %edx, %k3
-; AVX512F-32-NEXT: # kill: def %dl killed %dl killed %edx def %edx
+; AVX512F-32-NEXT: # kill: def $dl killed $dl killed $edx def $edx
; AVX512F-32-NEXT: andb $15, %dl
; AVX512F-32-NEXT: andb $2, %al
; AVX512F-32-NEXT: shrb %al
@@ -2932,7 +2932,7 @@
; AVX512F-32-NEXT: kmovd %ecx, %k5
; AVX512F-32-NEXT: movl %ebx, %edx
; AVX512F-32-NEXT: shrl $24, %edx
-; AVX512F-32-NEXT: # kill: def %al killed %al killed %eax def %eax
+; AVX512F-32-NEXT: # kill: def $al killed $al killed $eax def $eax
; AVX512F-32-NEXT: shrb $7, %al
; AVX512F-32-NEXT: kshiftlq $63, %k6, %k6
; AVX512F-32-NEXT: kshiftrq $15, %k6, %k6
@@ -2948,7 +2948,7 @@
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k7 # 8-byte Reload
; AVX512F-32-NEXT: kxorq %k7, %k1, %k7
; AVX512F-32-NEXT: kmovd %edx, %k1
-; AVX512F-32-NEXT: # kill: def %dl killed %dl killed %edx def %edx
+; AVX512F-32-NEXT: # kill: def $dl killed $dl killed $edx def $edx
; AVX512F-32-NEXT: andb $15, %dl
; AVX512F-32-NEXT: andb $2, %al
; AVX512F-32-NEXT: shrb %al
diff --git a/llvm/test/CodeGen/X86/avx512bw-mov.ll b/llvm/test/CodeGen/X86/avx512bw-mov.ll
index e968d76..31c94f6 100644
--- a/llvm/test/CodeGen/X86/avx512bw-mov.ll
+++ b/llvm/test/CodeGen/X86/avx512bw-mov.ll
@@ -100,7 +100,7 @@
; CHECK-NEXT: vpmovb2m %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %k1
; CHECK-NEXT: vmovdqu8 (%rdi), %zmm0 {%k1} {z}
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.masked.load.v16i8(<16 x i8>* %addr, i32 4, <16 x i1>%mask, <16 x i8> undef)
ret <16 x i8> %res
@@ -114,7 +114,7 @@
; CHECK-NEXT: vpmovb2m %zmm0, %k0
; CHECK-NEXT: kmovd %k0, %k1
; CHECK-NEXT: vmovdqu8 (%rdi), %zmm0 {%k1} {z}
-; CHECK-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; CHECK-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%res = call <32 x i8> @llvm.masked.load.v32i8(<32 x i8>* %addr, i32 4, <32 x i1>%mask, <32 x i8> zeroinitializer)
ret <32 x i8> %res
@@ -129,7 +129,7 @@
; CHECK-NEXT: kshiftld $24, %k0, %k0
; CHECK-NEXT: kshiftrd $24, %k0, %k1
; CHECK-NEXT: vmovdqu16 (%rdi), %zmm0 {%k1} {z}
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.masked.load.v8i16(<8 x i16>* %addr, i32 4, <8 x i1>%mask, <8 x i16> undef)
ret <8 x i16> %res
@@ -143,7 +143,7 @@
; CHECK-NEXT: vpmovb2m %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %k1
; CHECK-NEXT: vmovdqu16 (%rdi), %zmm0 {%k1} {z}
-; CHECK-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; CHECK-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%res = call <16 x i16> @llvm.masked.load.v16i16(<16 x i16>* %addr, i32 4, <16 x i1>%mask, <16 x i16> zeroinitializer)
ret <16 x i16> %res
@@ -153,7 +153,7 @@
define void @test_mask_store_16xi8(<16 x i1> %mask, <16 x i8>* %addr, <16 x i8> %val) {
; CHECK-LABEL: test_mask_store_16xi8:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
+; CHECK-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
; CHECK-NEXT: vpmovb2m %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %k1
@@ -167,7 +167,7 @@
define void @test_mask_store_32xi8(<32 x i1> %mask, <32 x i8>* %addr, <32 x i8> %val) {
; CHECK-LABEL: test_mask_store_32xi8:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
+; CHECK-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
; CHECK-NEXT: vpmovb2m %zmm0, %k0
; CHECK-NEXT: kmovd %k0, %k1
@@ -181,7 +181,7 @@
define void @test_mask_store_8xi16(<8 x i1> %mask, <8 x i16>* %addr, <8 x i16> %val) {
; CHECK-LABEL: test_mask_store_8xi16:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
+; CHECK-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
; CHECK-NEXT: vpsllw $15, %xmm0, %xmm0
; CHECK-NEXT: vpmovw2m %zmm0, %k0
; CHECK-NEXT: kshiftld $24, %k0, %k0
@@ -196,7 +196,7 @@
define void @test_mask_store_16xi16(<16 x i1> %mask, <16 x i16>* %addr, <16 x i16> %val) {
; CHECK-LABEL: test_mask_store_16xi16:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
+; CHECK-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
; CHECK-NEXT: vpmovb2m %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %k1
diff --git a/llvm/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll b/llvm/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll
index 685f7f5..646e146 100644
--- a/llvm/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll
+++ b/llvm/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll
@@ -503,7 +503,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x75,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.w.256(<16 x i16> %a, <16 x i16> %b, i16 -1)
@@ -516,7 +516,7 @@
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x75,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.w.256(<16 x i16> %a, <16 x i16> %b, i16 %mask)
@@ -555,7 +555,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x65,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.w.256(<16 x i16> %a, <16 x i16> %b, i16 -1)
@@ -568,7 +568,7 @@
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x65,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.w.256(<16 x i16> %a, <16 x i16> %b, i16 %mask)
@@ -582,7 +582,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x74,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.b.128(<16 x i8> %a, <16 x i8> %b, i16 -1)
ret i16 %res
@@ -594,7 +594,7 @@
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x74,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.b.128(<16 x i8> %a, <16 x i8> %b, i16 %mask)
ret i16 %res
@@ -607,7 +607,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x75,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.w.128(<8 x i16> %a, <8 x i16> %b, i8 -1)
ret i8 %res
@@ -619,7 +619,7 @@
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x75,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.w.128(<8 x i16> %a, <8 x i16> %b, i8 %mask)
ret i8 %res
@@ -632,7 +632,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x64,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.b.128(<16 x i8> %a, <16 x i8> %b, i16 -1)
ret i16 %res
@@ -644,7 +644,7 @@
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x64,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.b.128(<16 x i8> %a, <16 x i8> %b, i16 %mask)
ret i16 %res
@@ -657,7 +657,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x65,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.w.128(<8 x i16> %a, <8 x i16> %b, i8 -1)
ret i8 %res
@@ -669,7 +669,7 @@
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x65,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.w.128(<8 x i16> %a, <8 x i16> %b, i8 %mask)
ret i8 %res
@@ -3683,7 +3683,7 @@
; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addl %ecx, %eax ## encoding: [0x01,0xc8]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.ptestm.b.128(<16 x i8> %x0, <16 x i8> %x1, i16 %x2)
%res1 = call i16 @llvm.x86.avx512.ptestm.b.128(<16 x i8> %x0, <16 x i8> %x1, i16-1)
@@ -3721,7 +3721,7 @@
; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestm.w.128(<8 x i16> %x0, <8 x i16> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestm.w.128(<8 x i16> %x0, <8 x i16> %x1, i8-1)
@@ -3740,7 +3740,7 @@
; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addl %ecx, %eax ## encoding: [0x01,0xc8]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.ptestm.w.256(<16 x i16> %x0, <16 x i16> %x1, i16 %x2)
@@ -3760,7 +3760,7 @@
; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addl %ecx, %eax ## encoding: [0x01,0xc8]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.ptestnm.b.128(<16 x i8> %x0, <16 x i8> %x1, i16 %x2)
%res1 = call i16 @llvm.x86.avx512.ptestnm.b.128(<16 x i8> %x0, <16 x i8> %x1, i16-1)
@@ -3798,7 +3798,7 @@
; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestnm.w.128(<8 x i16> %x0, <8 x i16> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.w.128(<8 x i16> %x0, <8 x i16> %x1, i8-1)
@@ -3817,7 +3817,7 @@
; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addl %ecx, %eax ## encoding: [0x01,0xc8]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.ptestnm.w.256(<16 x i16> %x0, <16 x i16> %x1, i16 %x2)
@@ -3833,7 +3833,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovb2m %xmm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x08,0x29,0xc0]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.cvtb2mask.128(<16 x i8> %x0)
ret i16 %res
@@ -3859,7 +3859,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovw2m %xmm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x08,0x29,0xc0]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.cvtw2mask.128(<8 x i16> %x0)
ret i8 %res
@@ -3872,7 +3872,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovw2m %ymm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x28,0x29,0xc0]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.cvtw2mask.256(<16 x i16> %x0)
diff --git a/llvm/test/CodeGen/X86/avx512bwvl-vec-test-testn.ll b/llvm/test/CodeGen/X86/avx512bwvl-vec-test-testn.ll
index fba2b5f..8f20607 100644
--- a/llvm/test/CodeGen/X86/avx512bwvl-vec-test-testn.ll
+++ b/llvm/test/CodeGen/X86/avx512bwvl-vec-test-testn.ll
@@ -7,7 +7,7 @@
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestmb %xmm0, %xmm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -24,7 +24,7 @@
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestmb %xmm0, %xmm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -42,7 +42,7 @@
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestmw %xmm0, %xmm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: def %al killed %al killed %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -59,7 +59,7 @@
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestmw %xmm0, %xmm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: def %al killed %al killed %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -77,7 +77,7 @@
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestnmb %xmm0, %xmm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -94,7 +94,7 @@
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestnmb %xmm0, %xmm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -112,7 +112,7 @@
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestnmw %xmm0, %xmm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: def %al killed %al killed %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -129,7 +129,7 @@
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestnmw %xmm0, %xmm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: def %al killed %al killed %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -182,7 +182,7 @@
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestmw %ymm0, %ymm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -200,7 +200,7 @@
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestmw %ymm0, %ymm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -254,7 +254,7 @@
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestnmw %ymm0, %ymm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -272,7 +272,7 @@
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestnmw %ymm0, %ymm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
diff --git a/llvm/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll b/llvm/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll
index de0ee1b..3cfab57 100644
--- a/llvm/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll
+++ b/llvm/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll
@@ -151,7 +151,7 @@
define <16 x float>@test_int_x86_avx512_mask_broadcastf32x8_512(<8 x float> %x0, <16 x float> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x8_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; CHECK-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinsertf32x8 $1, %ymm0, %zmm0, %zmm1 {%k1}
@@ -185,7 +185,7 @@
define <8 x double>@test_int_x86_avx512_mask_broadcastf64x2_512(<2 x double> %x0, <8 x double> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x2_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
@@ -220,7 +220,7 @@
define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x8_512(<8 x i32> %x0, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x8_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; CHECK-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm1 {%k1}
@@ -254,7 +254,7 @@
define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x2_512(<2 x i64> %x0, <8 x i64> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x2_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
@@ -289,7 +289,7 @@
define <16 x float>@test_int_x86_avx512_mask_broadcastf32x2_512(<4 x float> %x0, <16 x float> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x2_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
@@ -311,7 +311,7 @@
define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x2_512(<4 x i32> %x0, <16 x i32> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x2_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
@@ -335,7 +335,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovd2m %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.cvtd2mask.512(<16 x i32> %x0)
ret i16 %res
@@ -348,7 +348,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovq2m %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.cvtq2mask.512(<8 x i64> %x0)
ret i8 %res
diff --git a/llvm/test/CodeGen/X86/avx512dq-intrinsics.ll b/llvm/test/CodeGen/X86/avx512dq-intrinsics.ll
index 6863fc89..f773eef 100644
--- a/llvm/test/CodeGen/X86/avx512dq-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512dq-intrinsics.ll
@@ -351,7 +351,7 @@
; CHECK-NEXT: vfpclasspd $4, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addb %cl, %al
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.fpclass.pd.512(<8 x double> %x0, i32 2, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.pd.512(<8 x double> %x0, i32 4, i8 -1)
@@ -369,7 +369,7 @@
; CHECK-NEXT: vfpclassps $4, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.fpclass.ps.512(<16 x float> %x0, i32 4, i16 %x1)
%res1 = call i16 @llvm.x86.avx512.mask.fpclass.ps.512(<16 x float> %x0, i32 4, i16 -1)
@@ -388,7 +388,7 @@
; CHECK-NEXT: vfpclasssd $4, %xmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addb %cl, %al
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.fpclass.sd(<2 x double> %x0, i32 2, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.sd(<2 x double> %x0, i32 4, i8 -1)
@@ -401,7 +401,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vfpclasssd $4, (%rdi), %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%x0 = load <2 x double>, <2 x double>* %x0ptr
%res = call i8 @llvm.x86.avx512.mask.fpclass.sd(<2 x double> %x0, i32 4, i8 -1)
@@ -419,7 +419,7 @@
; CHECK-NEXT: vfpclassss $4, %xmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addb %cl, %al
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.fpclass.ss(<4 x float> %x0, i32 4, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.ss(<4 x float> %x0, i32 4, i8 -1)
@@ -432,7 +432,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vfpclassss $4, (%rdi), %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%x0 = load <4 x float>, <4 x float>* %x0ptr
%res = call i8 @llvm.x86.avx512.mask.fpclass.ss(<4 x float> %x0, i32 4, i8 -1)
diff --git a/llvm/test/CodeGen/X86/avx512dq-mask-op.ll b/llvm/test/CodeGen/X86/avx512dq-mask-op.ll
index 8f7938f..d201720 100644
--- a/llvm/test/CodeGen/X86/avx512dq-mask-op.ll
+++ b/llvm/test/CodeGen/X86/avx512dq-mask-op.ll
@@ -7,7 +7,7 @@
; CHECK-NEXT: kmovd %edi, %k0
; CHECK-NEXT: knotb %k0, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%m0 = bitcast i8 %x to <8 x i1>
%m1 = xor <8 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
@@ -57,7 +57,7 @@
; CHECK-NEXT: kxorb %k1, %k0, %k0
; CHECK-NEXT: korb %k0, %k2, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%ma = load <8 x i1>, <8 x i1>* %x
%mb = load <8 x i1>, <8 x i1>* %y
diff --git a/llvm/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll b/llvm/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll
index 1f37d79..84c31f2 100644
--- a/llvm/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll
+++ b/llvm/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll
@@ -1673,7 +1673,7 @@
define <4 x double>@test_int_x86_avx512_mask_broadcastf64x2_256(<2 x double> %x0, <4 x double> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x2_256:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd0,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinsertf64x2 $1, %xmm0, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x18,0xc8,0x01]
@@ -1708,7 +1708,7 @@
define <4 x i64>@test_int_x86_avx512_mask_broadcasti64x2_256(<2 x i64> %x0, <4 x i64> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x2_256:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x38,0xd0,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinserti64x2 $1, %xmm0, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x38,0xc8,0x01]
@@ -1743,7 +1743,7 @@
define <8 x float>@test_int_x86_avx512_mask_broadcastf32x2_256(<4 x float> %x0, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x2_256:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd0,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x18,0xc8,0x01]
@@ -1764,7 +1764,7 @@
define <8 x i32>@test_int_x86_avx512_mask_broadcasti32x2_256(<4 x i32> %x0, <8 x i32> %x2, i8 %x3, i64 * %y_ptr) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x2_256:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; CHECK-NEXT: vmovq (%rsi), %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0x16]
; CHECK-NEXT: ## xmm2 = mem[0],zero
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -1811,7 +1811,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovd2m %xmm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x08,0x39,0xc0]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.cvtd2mask.128(<4 x i32> %x0)
ret i8 %res
@@ -1824,7 +1824,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovd2m %ymm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x28,0x39,0xc0]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.cvtd2mask.256(<8 x i32> %x0)
ret i8 %res
@@ -1837,7 +1837,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovq2m %xmm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x08,0x39,0xc0]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.cvtq2mask.128(<2 x i64> %x0)
ret i8 %res
@@ -1850,7 +1850,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovq2m %ymm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x28,0x39,0xc0]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.cvtq2mask.256(<4 x i64> %x0)
ret i8 %res
diff --git a/llvm/test/CodeGen/X86/avx512dqvl-intrinsics.ll b/llvm/test/CodeGen/X86/avx512dqvl-intrinsics.ll
index 5692efd..580fb60 100644
--- a/llvm/test/CodeGen/X86/avx512dqvl-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512dqvl-intrinsics.ll
@@ -560,7 +560,7 @@
; CHECK-NEXT: vfpclassps $4, %xmm0, %k0 ## encoding: [0x62,0xf3,0x7d,0x08,0x66,0xc0,0x04]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.fpclass.ps.128(<4 x float> %x0, i32 2, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.ps.128(<4 x float> %x0, i32 4, i8 -1)
@@ -579,7 +579,7 @@
; CHECK-NEXT: vfpclassps $4, %ymm0, %k0 ## encoding: [0x62,0xf3,0x7d,0x28,0x66,0xc0,0x04]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.fpclass.ps.256(<8 x float> %x0, i32 2, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.ps.256(<8 x float> %x0, i32 4, i8 -1)
@@ -598,7 +598,7 @@
; CHECK-NEXT: vfpclasspd $2, %xmm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x08,0x66,0xc0,0x02]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.fpclass.pd.128(<2 x double> %x0, i32 4, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.pd.128(<2 x double> %x0, i32 2, i8 -1)
@@ -617,7 +617,7 @@
; CHECK-NEXT: vfpclasspd $4, %ymm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x28,0x66,0xc0,0x04]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.fpclass.pd.256(<4 x double> %x0, i32 2, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.pd.256(<4 x double> %x0, i32 4, i8 -1)
diff --git a/llvm/test/CodeGen/X86/avx512f-vec-test-testn.ll b/llvm/test/CodeGen/X86/avx512f-vec-test-testn.ll
index 731f5ff..862116e 100644
--- a/llvm/test/CodeGen/X86/avx512f-vec-test-testn.ll
+++ b/llvm/test/CodeGen/X86/avx512f-vec-test-testn.ll
@@ -7,7 +7,7 @@
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestmq %zmm0, %zmm1, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: def %al killed %al killed %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -23,7 +23,7 @@
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestmd %zmm0, %zmm1, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -41,7 +41,7 @@
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vptestmq %zmm0, %zmm1, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: def %al killed %al killed %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -60,7 +60,7 @@
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vptestmd %zmm0, %zmm1, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -79,7 +79,7 @@
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestnmq %zmm0, %zmm1, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: def %al killed %al killed %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -95,7 +95,7 @@
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestnmd %zmm0, %zmm1, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -113,7 +113,7 @@
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vptestnmq %zmm0, %zmm1, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: def %al killed %al killed %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -132,7 +132,7 @@
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vptestnmd %zmm0, %zmm1, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
diff --git a/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll b/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
index d152a5a..16f6308 100644
--- a/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
+++ b/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
@@ -1064,7 +1064,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x76,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.d.256(<8 x i32> %a, <8 x i32> %b, i8 -1)
ret i8 %res
@@ -1076,7 +1076,7 @@
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x76,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.d.256(<8 x i32> %a, <8 x i32> %b, i8 %mask)
ret i8 %res
@@ -1089,7 +1089,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x28,0x29,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.256(<4 x i64> %a, <4 x i64> %b, i8 -1)
ret i8 %res
@@ -1101,7 +1101,7 @@
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x29,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.256(<4 x i64> %a, <4 x i64> %b, i8 %mask)
ret i8 %res
@@ -1114,7 +1114,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x66,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.d.256(<8 x i32> %a, <8 x i32> %b, i8 -1)
ret i8 %res
@@ -1126,7 +1126,7 @@
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x66,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.d.256(<8 x i32> %a, <8 x i32> %b, i8 %mask)
ret i8 %res
@@ -1139,7 +1139,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x28,0x37,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.256(<4 x i64> %a, <4 x i64> %b, i8 -1)
ret i8 %res
@@ -1151,7 +1151,7 @@
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x37,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.256(<4 x i64> %a, <4 x i64> %b, i8 %mask)
ret i8 %res
@@ -1164,7 +1164,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x76,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.d.128(<4 x i32> %a, <4 x i32> %b, i8 -1)
ret i8 %res
@@ -1176,7 +1176,7 @@
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x76,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.d.128(<4 x i32> %a, <4 x i32> %b, i8 %mask)
ret i8 %res
@@ -1189,7 +1189,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x08,0x29,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.128(<2 x i64> %a, <2 x i64> %b, i8 -1)
ret i8 %res
@@ -1201,7 +1201,7 @@
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x29,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.128(<2 x i64> %a, <2 x i64> %b, i8 %mask)
ret i8 %res
@@ -1214,7 +1214,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x66,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.d.128(<4 x i32> %a, <4 x i32> %b, i8 -1)
ret i8 %res
@@ -1226,7 +1226,7 @@
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x66,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.d.128(<4 x i32> %a, <4 x i32> %b, i8 %mask)
ret i8 %res
@@ -1239,7 +1239,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x08,0x37,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.128(<2 x i64> %a, <2 x i64> %b, i8 -1)
ret i8 %res
@@ -1251,7 +1251,7 @@
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x37,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.128(<2 x i64> %a, <2 x i64> %b, i8 %mask)
ret i8 %res
@@ -5863,7 +5863,7 @@
define <8 x float>@test_int_x86_avx512_mask_broadcastf32x4_256(<4 x float> %x0, <8 x float> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x4_256:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd0,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x18,0xc8,0x01]
@@ -5896,7 +5896,7 @@
define <8 x i32>@test_int_x86_avx512_mask_broadcasti32x4_256(<4 x i32> %x0, <8 x i32> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x4_256:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x38,0xd0,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x38,0xc8,0x01]
@@ -5999,7 +5999,7 @@
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestm.d.128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestm.d.128(<4 x i32> %x0, <4 x i32> %x1, i8-1)
@@ -6018,7 +6018,7 @@
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestm.d.256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestm.d.256(<8 x i32> %x0, <8 x i32> %x1, i8-1)
@@ -6037,7 +6037,7 @@
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestm.q.128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestm.q.128(<2 x i64> %x0, <2 x i64> %x1, i8-1)
@@ -6056,7 +6056,7 @@
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestm.q.256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestm.q.256(<4 x i64> %x0, <4 x i64> %x1, i8-1)
@@ -6075,7 +6075,7 @@
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestnm.d.128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.d.128(<4 x i32> %x0, <4 x i32> %x1, i8-1)
@@ -6094,7 +6094,7 @@
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestnm.d.256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.d.256(<8 x i32> %x0, <8 x i32> %x1, i8-1)
@@ -6113,7 +6113,7 @@
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestnm.q.128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.q.128(<2 x i64> %x0, <2 x i64> %x1, i8-1)
@@ -6132,7 +6132,7 @@
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestnm.q.256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.q.256(<4 x i64> %x0, <4 x i64> %x1, i8-1)
diff --git a/llvm/test/CodeGen/X86/avx512vl-intrinsics.ll b/llvm/test/CodeGen/X86/avx512vl-intrinsics.ll
index f635342..a625467 100644
--- a/llvm/test/CodeGen/X86/avx512vl-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512vl-intrinsics.ll
@@ -718,7 +718,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpleps %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7c,0x28,0xc2,0xc1,0x02]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.cmp.ps.256(<8 x float> %a, <8 x float> %b, i32 2, i8 -1)
ret i8 %res
@@ -730,7 +730,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpleps %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7c,0x08,0xc2,0xc1,0x02]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.cmp.ps.128(<4 x float> %a, <4 x float> %b, i32 2, i8 -1)
ret i8 %res
@@ -742,7 +742,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vcmplepd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0xfd,0x28,0xc2,0xc1,0x02]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.cmp.pd.256(<4 x double> %a, <4 x double> %b, i32 2, i8 -1)
ret i8 %res
@@ -754,7 +754,7 @@
; CHECK: ## %bb.0:
; CHECK-NEXT: vcmplepd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0xfd,0x08,0xc2,0xc1,0x02]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.cmp.pd.128(<2 x double> %a, <2 x double> %b, i32 2, i8 -1)
ret i8 %res
diff --git a/llvm/test/CodeGen/X86/avx512vl-vec-cmp.ll b/llvm/test/CodeGen/X86/avx512vl-vec-cmp.ll
index dd9ed2c..23fbbf3 100644
--- a/llvm/test/CodeGen/X86/avx512vl-vec-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx512vl-vec-cmp.ll
@@ -11,11 +11,11 @@
;
; NoVLX-LABEL: test256_1:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; NoVLX-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NoVLX-NEXT: retq
%mask = icmp eq <4 x i64> %x, %y
%max = select <4 x i1> %mask, <4 x i64> %x, <4 x i64> %y
@@ -31,12 +31,12 @@
;
; NoVLX-LABEL: test256_2:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
; NoVLX-NEXT: vpblendmq %zmm2, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NoVLX-NEXT: retq
%mask = icmp sgt <4 x i64> %x, %y
%max = select <4 x i1> %mask, <4 x i64> %x1, <4 x i64> %y
@@ -52,12 +52,12 @@
;
; NoVLX-LABEL: test256_3:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k1
; NoVLX-NEXT: vpblendmd %zmm2, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NoVLX-NEXT: retq
%mask = icmp sge <8 x i32> %x, %y
%max = select <8 x i1> %mask, <8 x i32> %x1, <8 x i32> %y
@@ -73,12 +73,12 @@
;
; NoVLX-LABEL: test256_4:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpnleuq %zmm1, %zmm0, %k1
; NoVLX-NEXT: vpblendmq %zmm2, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NoVLX-NEXT: retq
%mask = icmp ugt <4 x i64> %x, %y
%max = select <4 x i1> %mask, <4 x i64> %x1, <4 x i64> %y
@@ -94,12 +94,12 @@
;
; NoVLX-LABEL: test256_5:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpeqd %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp eq <8 x i32> %x, %y
@@ -116,12 +116,12 @@
;
; NoVLX-LABEL: test256_5b:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpeqd %zmm0, %zmm2, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp eq <8 x i32> %y, %x
@@ -138,12 +138,12 @@
;
; NoVLX-LABEL: test256_6:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpgtd %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp sgt <8 x i32> %x, %y
@@ -160,12 +160,12 @@
;
; NoVLX-LABEL: test256_6b:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpgtd %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp slt <8 x i32> %y, %x
@@ -182,12 +182,12 @@
;
; NoVLX-LABEL: test256_7:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpled %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp sle <8 x i32> %x, %y
@@ -204,12 +204,12 @@
;
; NoVLX-LABEL: test256_7b:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpled %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp sge <8 x i32> %y, %x
@@ -226,12 +226,12 @@
;
; NoVLX-LABEL: test256_8:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpleud %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp ule <8 x i32> %x, %y
@@ -248,12 +248,12 @@
;
; NoVLX-LABEL: test256_8b:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpnltud %zmm0, %zmm2, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp uge <8 x i32> %y, %x
@@ -271,14 +271,14 @@
;
; NoVLX-LABEL: test256_9:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3
-; NoVLX-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm3 killed $ymm3 def $zmm3
+; NoVLX-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; NoVLX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 {%k1}
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NoVLX-NEXT: retq
%mask1 = icmp eq <8 x i32> %x1, %y1
%mask0 = icmp eq <8 x i32> %x, %y
@@ -297,14 +297,14 @@
;
; NoVLX-LABEL: test256_10:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3
-; NoVLX-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm3 killed $ymm3 def $zmm3
+; NoVLX-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpleq %zmm1, %zmm0, %k1
; NoVLX-NEXT: vpcmpleq %zmm2, %zmm3, %k1 {%k1}
; NoVLX-NEXT: vpblendmq %zmm0, %zmm2, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NoVLX-NEXT: retq
%mask1 = icmp sge <4 x i64> %x1, %y1
%mask0 = icmp sle <4 x i64> %x, %y
@@ -323,14 +323,14 @@
;
; NoVLX-LABEL: test256_11:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %ymm3
; NoVLX-NEXT: vpcmpgtq %zmm3, %zmm0, %k1
; NoVLX-NEXT: vpcmpgtq %zmm2, %zmm1, %k1 {%k1}
; NoVLX-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NoVLX-NEXT: retq
%mask1 = icmp sgt <4 x i64> %x1, %y1
%y = load <4 x i64>, <4 x i64>* %y.ptr, align 4
@@ -350,14 +350,14 @@
;
; NoVLX-LABEL: test256_12:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %ymm3
; NoVLX-NEXT: vpcmpleud %zmm3, %zmm0, %k1
; NoVLX-NEXT: vpcmpled %zmm1, %zmm2, %k1 {%k1}
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NoVLX-NEXT: retq
%mask1 = icmp sge <8 x i32> %x1, %y1
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
@@ -376,12 +376,12 @@
;
; NoVLX-LABEL: test256_13:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm2
; NoVLX-NEXT: vpcmpeqq %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NoVLX-NEXT: retq
%yb = load i64, i64* %yb.ptr, align 4
%y.0 = insertelement <4 x i64> undef, i64 %yb, i32 0
@@ -400,12 +400,12 @@
;
; NoVLX-LABEL: test256_14:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm2
; NoVLX-NEXT: vpcmpled %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NoVLX-NEXT: retq
%yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <8 x i32> undef, i32 %yb, i32 0
@@ -425,14 +425,14 @@
;
; NoVLX-LABEL: test256_15:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpled %zmm1, %zmm2, %k1
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm2
; NoVLX-NEXT: vpcmpgtd %zmm2, %zmm0, %k1 {%k1}
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NoVLX-NEXT: retq
%mask1 = icmp sge <8 x i32> %x1, %y1
%yb = load i32, i32* %yb.ptr, align 4
@@ -454,14 +454,14 @@
;
; NoVLX-LABEL: test256_16:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpleq %zmm1, %zmm2, %k1
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm2
; NoVLX-NEXT: vpcmpgtq %zmm2, %zmm0, %k1 {%k1}
; NoVLX-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NoVLX-NEXT: retq
%mask1 = icmp sge <4 x i64> %x1, %y1
%yb = load i64, i64* %yb.ptr, align 4
@@ -482,12 +482,12 @@
;
; NoVLX-LABEL: test256_17:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpneqd %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp ne <8 x i32> %x, %y
@@ -504,12 +504,12 @@
;
; NoVLX-LABEL: test256_18:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpneqd %zmm0, %zmm2, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp ne <8 x i32> %y, %x
@@ -526,12 +526,12 @@
;
; NoVLX-LABEL: test256_19:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpnltud %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp uge <8 x i32> %x, %y
@@ -548,12 +548,12 @@
;
; NoVLX-LABEL: test256_20:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpnltud %zmm0, %zmm2, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp uge <8 x i32> %y, %x
@@ -570,11 +570,11 @@
;
; NoVLX-LABEL: test128_1:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
; NoVLX-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NoVLX-NEXT: retq
%mask = icmp eq <2 x i64> %x, %y
%max = select <2 x i1> %mask, <2 x i64> %x, <2 x i64> %y
@@ -590,12 +590,12 @@
;
; NoVLX-LABEL: test128_2:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
; NoVLX-NEXT: vpblendmq %zmm2, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NoVLX-NEXT: retq
%mask = icmp sgt <2 x i64> %x, %y
%max = select <2 x i1> %mask, <2 x i64> %x1, <2 x i64> %y
@@ -611,12 +611,12 @@
;
; NoVLX-LABEL: test128_3:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k1
; NoVLX-NEXT: vpblendmd %zmm2, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NoVLX-NEXT: retq
%mask = icmp sge <4 x i32> %x, %y
%max = select <4 x i1> %mask, <4 x i32> %x1, <4 x i32> %y
@@ -632,12 +632,12 @@
;
; NoVLX-LABEL: test128_4:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpnleuq %zmm1, %zmm0, %k1
; NoVLX-NEXT: vpblendmq %zmm2, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NoVLX-NEXT: retq
%mask = icmp ugt <2 x i64> %x, %y
%max = select <2 x i1> %mask, <2 x i64> %x1, <2 x i64> %y
@@ -653,12 +653,12 @@
;
; NoVLX-LABEL: test128_5:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %xmm2
; NoVLX-NEXT: vpcmpeqd %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NoVLX-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %yp, align 4
%mask = icmp eq <4 x i32> %x, %y
@@ -675,12 +675,12 @@
;
; NoVLX-LABEL: test128_5b:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %xmm2
; NoVLX-NEXT: vpcmpeqd %zmm0, %zmm2, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NoVLX-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %yp, align 4
%mask = icmp eq <4 x i32> %y, %x
@@ -697,12 +697,12 @@
;
; NoVLX-LABEL: test128_6:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %xmm2
; NoVLX-NEXT: vpcmpgtd %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NoVLX-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp sgt <4 x i32> %x, %y
@@ -719,12 +719,12 @@
;
; NoVLX-LABEL: test128_6b:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %xmm2
; NoVLX-NEXT: vpcmpgtd %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NoVLX-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp slt <4 x i32> %y, %x
@@ -741,12 +741,12 @@
;
; NoVLX-LABEL: test128_7:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %xmm2
; NoVLX-NEXT: vpcmpled %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NoVLX-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp sle <4 x i32> %x, %y
@@ -763,12 +763,12 @@
;
; NoVLX-LABEL: test128_7b:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %xmm2
; NoVLX-NEXT: vpcmpled %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NoVLX-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp sge <4 x i32> %y, %x
@@ -785,12 +785,12 @@
;
; NoVLX-LABEL: test128_8:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %xmm2
; NoVLX-NEXT: vpcmpleud %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NoVLX-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp ule <4 x i32> %x, %y
@@ -807,12 +807,12 @@
;
; NoVLX-LABEL: test128_8b:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %xmm2
; NoVLX-NEXT: vpcmpnltud %zmm0, %zmm2, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NoVLX-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp uge <4 x i32> %y, %x
@@ -830,14 +830,14 @@
;
; NoVLX-LABEL: test128_9:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %xmm3 killed %xmm3 def %zmm3
-; NoVLX-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm3 killed $xmm3 def $zmm3
+; NoVLX-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; NoVLX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 {%k1}
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NoVLX-NEXT: retq
%mask1 = icmp eq <4 x i32> %x1, %y1
%mask0 = icmp eq <4 x i32> %x, %y
@@ -856,14 +856,14 @@
;
; NoVLX-LABEL: test128_10:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %xmm3 killed %xmm3 def %zmm3
-; NoVLX-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm3 killed $xmm3 def $zmm3
+; NoVLX-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpleq %zmm1, %zmm0, %k1
; NoVLX-NEXT: vpcmpleq %zmm2, %zmm3, %k1 {%k1}
; NoVLX-NEXT: vpblendmq %zmm0, %zmm2, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NoVLX-NEXT: retq
%mask1 = icmp sge <2 x i64> %x1, %y1
%mask0 = icmp sle <2 x i64> %x, %y
@@ -882,14 +882,14 @@
;
; NoVLX-LABEL: test128_11:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %xmm3
; NoVLX-NEXT: vpcmpgtq %zmm3, %zmm0, %k1
; NoVLX-NEXT: vpcmpgtq %zmm2, %zmm1, %k1 {%k1}
; NoVLX-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NoVLX-NEXT: retq
%mask1 = icmp sgt <2 x i64> %x1, %y1
%y = load <2 x i64>, <2 x i64>* %y.ptr, align 4
@@ -909,14 +909,14 @@
;
; NoVLX-LABEL: test128_12:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %xmm3
; NoVLX-NEXT: vpcmpleud %zmm3, %zmm0, %k1
; NoVLX-NEXT: vpcmpled %zmm1, %zmm2, %k1 {%k1}
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NoVLX-NEXT: retq
%mask1 = icmp sge <4 x i32> %x1, %y1
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
@@ -935,12 +935,12 @@
;
; NoVLX-LABEL: test128_13:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm2
; NoVLX-NEXT: vpcmpeqq %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NoVLX-NEXT: retq
%yb = load i64, i64* %yb.ptr, align 4
%y.0 = insertelement <2 x i64> undef, i64 %yb, i32 0
@@ -959,12 +959,12 @@
;
; NoVLX-LABEL: test128_14:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm2
; NoVLX-NEXT: vpcmpled %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NoVLX-NEXT: retq
%yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <4 x i32> undef, i32 %yb, i32 0
@@ -984,14 +984,14 @@
;
; NoVLX-LABEL: test128_15:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpled %zmm1, %zmm2, %k1
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm2
; NoVLX-NEXT: vpcmpgtd %zmm2, %zmm0, %k1 {%k1}
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NoVLX-NEXT: retq
%mask1 = icmp sge <4 x i32> %x1, %y1
%yb = load i32, i32* %yb.ptr, align 4
@@ -1013,14 +1013,14 @@
;
; NoVLX-LABEL: test128_16:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpleq %zmm1, %zmm2, %k1
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm2
; NoVLX-NEXT: vpcmpgtq %zmm2, %zmm0, %k1 {%k1}
; NoVLX-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NoVLX-NEXT: retq
%mask1 = icmp sge <2 x i64> %x1, %y1
%yb = load i64, i64* %yb.ptr, align 4
@@ -1041,12 +1041,12 @@
;
; NoVLX-LABEL: test128_17:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %xmm2
; NoVLX-NEXT: vpcmpneqd %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NoVLX-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp ne <4 x i32> %x, %y
@@ -1063,12 +1063,12 @@
;
; NoVLX-LABEL: test128_18:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %xmm2
; NoVLX-NEXT: vpcmpneqd %zmm0, %zmm2, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NoVLX-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp ne <4 x i32> %y, %x
@@ -1085,12 +1085,12 @@
;
; NoVLX-LABEL: test128_19:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %xmm2
; NoVLX-NEXT: vpcmpnltud %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NoVLX-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp uge <4 x i32> %x, %y
@@ -1107,12 +1107,12 @@
;
; NoVLX-LABEL: test128_20:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %xmm2
; NoVLX-NEXT: vpcmpnltud %zmm0, %zmm2, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NoVLX-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp uge <4 x i32> %y, %x
@@ -1130,12 +1130,12 @@
;
; NoVLX-LABEL: testnm_and:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3
-; NoVLX-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
+; NoVLX-NEXT: # kill: def $ymm3 killed $ymm3 def $zmm3
+; NoVLX-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2
; NoVLX-NEXT: vpor %ymm1, %ymm0, %ymm0
; NoVLX-NEXT: vptestnmd %zmm0, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm2, %zmm3, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NoVLX-NEXT: retq
%c = icmp eq <8 x i32> %a, zeroinitializer
%d = icmp eq <8 x i32> %b, zeroinitializer
diff --git a/llvm/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll b/llvm/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll
index d5a2f38..f06454c 100644
--- a/llvm/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll
@@ -364,7 +364,7 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqw %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqw_v8i1_v16i1_mask:
@@ -373,7 +373,7 @@
; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -390,7 +390,7 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqw (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqw_v8i1_v16i1_mask_mem:
@@ -399,7 +399,7 @@
; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -418,7 +418,7 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqw_v8i1_v16i1_mask:
@@ -428,7 +428,7 @@
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -448,7 +448,7 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqw (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqw_v8i1_v16i1_mask_mem:
@@ -458,7 +458,7 @@
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -1572,18 +1572,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -1600,18 +1600,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -1630,19 +1630,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -1663,19 +1663,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -1697,18 +1697,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -1728,19 +1728,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -1763,18 +1763,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -1791,18 +1791,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -1821,19 +1821,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -1854,19 +1854,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -1888,18 +1888,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -1919,19 +1919,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -1958,8 +1958,8 @@
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
@@ -1984,7 +1984,7 @@
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -2012,8 +2012,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -2043,7 +2043,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
@@ -2075,7 +2075,7 @@
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -2104,7 +2104,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
@@ -2137,8 +2137,8 @@
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
@@ -2164,7 +2164,7 @@
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -2193,8 +2193,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -2225,7 +2225,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
@@ -2258,7 +2258,7 @@
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -2288,7 +2288,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
@@ -2318,19 +2318,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -2347,19 +2347,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -2378,20 +2378,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -2411,20 +2411,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -2445,19 +2445,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -2477,20 +2477,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -2517,8 +2517,8 @@
;
; NoVLX-LABEL: test_vpcmpeqd_v8i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
@@ -2544,7 +2544,7 @@
;
; NoVLX-LABEL: test_vpcmpeqd_v8i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -2573,8 +2573,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -2604,7 +2604,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
@@ -2636,7 +2636,7 @@
;
; NoVLX-LABEL: test_vpcmpeqd_v8i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -2666,7 +2666,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
@@ -2699,8 +2699,8 @@
;
; NoVLX-LABEL: test_vpcmpeqd_v8i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
@@ -2727,7 +2727,7 @@
;
; NoVLX-LABEL: test_vpcmpeqd_v8i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -2757,8 +2757,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -2789,7 +2789,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
@@ -2822,7 +2822,7 @@
;
; NoVLX-LABEL: test_vpcmpeqd_v8i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -2853,7 +2853,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
@@ -3208,8 +3208,8 @@
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v4i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
@@ -3236,7 +3236,7 @@
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v4i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -3266,8 +3266,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v4i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -3299,7 +3299,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v4i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
@@ -3333,7 +3333,7 @@
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v4i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -3364,7 +3364,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v4i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
@@ -3395,18 +3395,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -3423,18 +3423,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -3453,19 +3453,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -3486,19 +3486,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -3520,18 +3520,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -3551,19 +3551,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -3586,18 +3586,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -3614,18 +3614,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -3644,19 +3644,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -3677,19 +3677,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -3711,18 +3711,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -3742,19 +3742,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -3781,8 +3781,8 @@
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
@@ -3807,7 +3807,7 @@
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -3835,8 +3835,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -3866,7 +3866,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
@@ -3898,7 +3898,7 @@
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -3927,7 +3927,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
@@ -3960,8 +3960,8 @@
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
@@ -3987,7 +3987,7 @@
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -4016,8 +4016,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -4048,7 +4048,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
@@ -4081,7 +4081,7 @@
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -4111,7 +4111,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
@@ -4141,19 +4141,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v4i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -4170,19 +4170,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v4i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -4201,20 +4201,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -4235,20 +4235,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -4270,19 +4270,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v4i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -4302,20 +4302,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -4338,19 +4338,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v4i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -4367,19 +4367,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v4i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -4398,20 +4398,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -4432,20 +4432,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -4467,19 +4467,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v4i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -4499,20 +4499,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -4540,8 +4540,8 @@
;
; NoVLX-LABEL: test_vpcmpeqq_v4i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
@@ -4567,7 +4567,7 @@
;
; NoVLX-LABEL: test_vpcmpeqq_v4i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -4596,8 +4596,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -4628,7 +4628,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
@@ -4661,7 +4661,7 @@
;
; NoVLX-LABEL: test_vpcmpeqq_v4i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -4691,7 +4691,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
@@ -4725,8 +4725,8 @@
;
; NoVLX-LABEL: test_vpcmpeqq_v4i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
@@ -4753,7 +4753,7 @@
;
; NoVLX-LABEL: test_vpcmpeqq_v4i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -4783,8 +4783,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -4816,7 +4816,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
@@ -4850,7 +4850,7 @@
;
; NoVLX-LABEL: test_vpcmpeqq_v4i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -4881,7 +4881,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v4i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
@@ -4911,7 +4911,7 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -4919,7 +4919,7 @@
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -4936,7 +4936,7 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -4944,7 +4944,7 @@
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq (%rdi), %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -4963,7 +4963,7 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -4972,7 +4972,7 @@
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -4992,7 +4992,7 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -5001,7 +5001,7 @@
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq (%rsi), %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -5022,7 +5022,7 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -5030,7 +5030,7 @@
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -5050,7 +5050,7 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -5059,7 +5059,7 @@
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -5760,7 +5760,7 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtw_v8i1_v16i1_mask:
@@ -5769,7 +5769,7 @@
; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -5786,7 +5786,7 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtw (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtw_v8i1_v16i1_mask_mem:
@@ -5795,7 +5795,7 @@
; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -5814,7 +5814,7 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtw_v8i1_v16i1_mask:
@@ -5824,7 +5824,7 @@
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -5844,7 +5844,7 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtw (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtw_v8i1_v16i1_mask_mem:
@@ -5854,7 +5854,7 @@
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -6968,18 +6968,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -6996,18 +6996,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -7026,19 +7026,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -7059,19 +7059,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -7093,18 +7093,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -7124,19 +7124,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -7159,18 +7159,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -7187,18 +7187,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -7217,19 +7217,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -7250,19 +7250,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -7284,18 +7284,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -7315,19 +7315,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -7354,8 +7354,8 @@
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
@@ -7380,7 +7380,7 @@
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -7408,8 +7408,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -7439,7 +7439,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
@@ -7471,7 +7471,7 @@
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -7500,7 +7500,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
@@ -7533,8 +7533,8 @@
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
@@ -7560,7 +7560,7 @@
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -7589,8 +7589,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -7621,7 +7621,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
@@ -7654,7 +7654,7 @@
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -7684,7 +7684,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
@@ -7714,19 +7714,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -7743,19 +7743,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -7774,20 +7774,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -7807,20 +7807,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -7841,19 +7841,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -7873,20 +7873,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -7913,8 +7913,8 @@
;
; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
@@ -7940,7 +7940,7 @@
;
; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -7969,8 +7969,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -8000,7 +8000,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
@@ -8032,7 +8032,7 @@
;
; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -8062,7 +8062,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
@@ -8095,8 +8095,8 @@
;
; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
@@ -8123,7 +8123,7 @@
;
; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -8153,8 +8153,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -8185,7 +8185,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
@@ -8218,7 +8218,7 @@
;
; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -8249,7 +8249,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
@@ -8604,8 +8604,8 @@
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v4i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
@@ -8632,7 +8632,7 @@
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v4i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -8662,8 +8662,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v4i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -8695,7 +8695,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
@@ -8729,7 +8729,7 @@
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v4i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -8760,7 +8760,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
@@ -8791,18 +8791,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -8819,18 +8819,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -8849,19 +8849,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -8882,19 +8882,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -8916,18 +8916,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -8947,19 +8947,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -8982,18 +8982,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -9010,18 +9010,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -9040,19 +9040,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -9073,19 +9073,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -9107,18 +9107,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -9138,19 +9138,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -9177,8 +9177,8 @@
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
@@ -9203,7 +9203,7 @@
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -9231,8 +9231,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -9262,7 +9262,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
@@ -9294,7 +9294,7 @@
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -9323,7 +9323,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
@@ -9356,8 +9356,8 @@
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
@@ -9383,7 +9383,7 @@
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -9412,8 +9412,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -9444,7 +9444,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
@@ -9477,7 +9477,7 @@
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -9507,7 +9507,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
@@ -9537,19 +9537,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -9566,19 +9566,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -9597,20 +9597,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -9631,20 +9631,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -9666,19 +9666,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -9698,20 +9698,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -9734,19 +9734,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -9763,19 +9763,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -9794,20 +9794,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -9828,20 +9828,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -9863,19 +9863,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -9895,20 +9895,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -9936,8 +9936,8 @@
;
; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
@@ -9963,7 +9963,7 @@
;
; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -9992,8 +9992,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -10024,7 +10024,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
@@ -10057,7 +10057,7 @@
;
; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -10087,7 +10087,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
@@ -10121,8 +10121,8 @@
;
; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
@@ -10149,7 +10149,7 @@
;
; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -10179,8 +10179,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -10212,7 +10212,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
@@ -10246,7 +10246,7 @@
;
; NoVLX-LABEL: test_vpcmpsgtq_v4i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -10277,7 +10277,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
@@ -10307,7 +10307,7 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -10315,7 +10315,7 @@
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -10332,7 +10332,7 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -10340,7 +10340,7 @@
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq (%rdi), %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -10359,7 +10359,7 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -10368,7 +10368,7 @@
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -10388,7 +10388,7 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -10397,7 +10397,7 @@
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq (%rsi), %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -10418,7 +10418,7 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -10426,7 +10426,7 @@
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq (%rdi){1to8}, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -10446,7 +10446,7 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -10455,7 +10455,7 @@
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -11190,7 +11190,7 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmplew %xmm0, %xmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgew_v8i1_v16i1_mask:
@@ -11201,7 +11201,7 @@
; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -11218,7 +11218,7 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltw (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgew_v8i1_v16i1_mask_mem:
@@ -11230,7 +11230,7 @@
; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -11249,7 +11249,7 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmplew %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgew_v8i1_v16i1_mask:
@@ -11261,7 +11261,7 @@
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -11281,7 +11281,7 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltw (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgew_v8i1_v16i1_mask_mem:
@@ -11294,7 +11294,7 @@
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -12468,18 +12468,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -12496,18 +12496,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -12526,19 +12526,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -12559,19 +12559,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -12593,18 +12593,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -12624,19 +12624,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -12659,18 +12659,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -12687,18 +12687,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -12717,19 +12717,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -12750,19 +12750,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -12784,18 +12784,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -12815,19 +12815,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -12854,8 +12854,8 @@
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
@@ -12880,7 +12880,7 @@
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -12908,8 +12908,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -12939,7 +12939,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
@@ -12971,7 +12971,7 @@
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -13000,7 +13000,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
@@ -13033,8 +13033,8 @@
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
@@ -13060,7 +13060,7 @@
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -13089,8 +13089,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -13121,7 +13121,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
@@ -13154,7 +13154,7 @@
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -13184,7 +13184,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
@@ -13214,19 +13214,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -13243,19 +13243,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -13274,20 +13274,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -13307,20 +13307,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -13341,19 +13341,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -13373,20 +13373,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -13413,8 +13413,8 @@
;
; NoVLX-LABEL: test_vpcmpsged_v8i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
@@ -13440,7 +13440,7 @@
;
; NoVLX-LABEL: test_vpcmpsged_v8i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -13469,8 +13469,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -13500,7 +13500,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
@@ -13532,7 +13532,7 @@
;
; NoVLX-LABEL: test_vpcmpsged_v8i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -13562,7 +13562,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
@@ -13595,8 +13595,8 @@
;
; NoVLX-LABEL: test_vpcmpsged_v8i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
@@ -13623,7 +13623,7 @@
;
; NoVLX-LABEL: test_vpcmpsged_v8i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -13653,8 +13653,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -13685,7 +13685,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
@@ -13718,7 +13718,7 @@
;
; NoVLX-LABEL: test_vpcmpsged_v8i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -13749,7 +13749,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
@@ -14104,8 +14104,8 @@
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v4i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
@@ -14132,7 +14132,7 @@
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v4i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -14162,8 +14162,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v4i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -14195,7 +14195,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
@@ -14229,7 +14229,7 @@
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v4i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -14260,7 +14260,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
@@ -14291,18 +14291,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -14319,18 +14319,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -14349,19 +14349,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -14382,19 +14382,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -14416,18 +14416,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -14447,19 +14447,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -14482,18 +14482,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -14510,18 +14510,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -14540,19 +14540,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -14573,19 +14573,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -14607,18 +14607,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -14638,19 +14638,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -14677,8 +14677,8 @@
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
@@ -14703,7 +14703,7 @@
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -14731,8 +14731,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -14762,7 +14762,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
@@ -14794,7 +14794,7 @@
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -14823,7 +14823,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
@@ -14856,8 +14856,8 @@
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
@@ -14883,7 +14883,7 @@
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -14912,8 +14912,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -14944,7 +14944,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
@@ -14977,7 +14977,7 @@
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -15007,7 +15007,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
@@ -15037,19 +15037,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -15066,19 +15066,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -15097,20 +15097,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -15131,20 +15131,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -15166,19 +15166,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -15198,20 +15198,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -15234,19 +15234,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -15263,19 +15263,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -15294,20 +15294,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -15328,20 +15328,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -15363,19 +15363,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -15395,20 +15395,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -15436,8 +15436,8 @@
;
; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
@@ -15463,7 +15463,7 @@
;
; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -15492,8 +15492,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -15524,7 +15524,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
@@ -15557,7 +15557,7 @@
;
; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -15587,7 +15587,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
@@ -15621,8 +15621,8 @@
;
; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
@@ -15649,7 +15649,7 @@
;
; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -15679,8 +15679,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -15712,7 +15712,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
@@ -15746,7 +15746,7 @@
;
; NoVLX-LABEL: test_vpcmpsgeq_v4i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -15777,7 +15777,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
@@ -15807,7 +15807,7 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -15815,7 +15815,7 @@
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -15832,7 +15832,7 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -15840,7 +15840,7 @@
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpnltq (%rdi), %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -15859,7 +15859,7 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -15868,7 +15868,7 @@
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -15888,7 +15888,7 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -15897,7 +15897,7 @@
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpnltq (%rsi), %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -15918,7 +15918,7 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -15926,7 +15926,7 @@
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpnltq (%rdi){1to8}, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -15946,7 +15946,7 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -15955,7 +15955,7 @@
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpnltq (%rsi){1to8}, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -16692,7 +16692,7 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuw %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultw_v8i1_v16i1_mask:
@@ -16704,7 +16704,7 @@
; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -16721,7 +16721,7 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuw (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultw_v8i1_v16i1_mask_mem:
@@ -16733,7 +16733,7 @@
; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -16752,7 +16752,7 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuw %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultw_v8i1_v16i1_mask:
@@ -16765,7 +16765,7 @@
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -16785,7 +16785,7 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuw (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultw_v8i1_v16i1_mask_mem:
@@ -16798,7 +16798,7 @@
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -17980,18 +17980,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -18008,18 +18008,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -18038,19 +18038,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -18071,19 +18071,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -18105,18 +18105,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -18136,19 +18136,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -18171,18 +18171,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -18199,18 +18199,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -18229,19 +18229,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -18262,19 +18262,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -18296,18 +18296,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -18327,19 +18327,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -18366,8 +18366,8 @@
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
@@ -18392,7 +18392,7 @@
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -18420,8 +18420,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -18451,7 +18451,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
@@ -18483,7 +18483,7 @@
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -18512,7 +18512,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
@@ -18545,8 +18545,8 @@
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
@@ -18572,7 +18572,7 @@
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -18601,8 +18601,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -18633,7 +18633,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
@@ -18666,7 +18666,7 @@
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %xmm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -18696,7 +18696,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
@@ -18726,19 +18726,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -18755,19 +18755,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -18786,20 +18786,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -18819,20 +18819,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -18853,19 +18853,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -18885,20 +18885,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -18925,8 +18925,8 @@
;
; NoVLX-LABEL: test_vpcmpultd_v8i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
@@ -18952,7 +18952,7 @@
;
; NoVLX-LABEL: test_vpcmpultd_v8i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -18981,8 +18981,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -19012,7 +19012,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
@@ -19044,7 +19044,7 @@
;
; NoVLX-LABEL: test_vpcmpultd_v8i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -19074,7 +19074,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
@@ -19107,8 +19107,8 @@
;
; NoVLX-LABEL: test_vpcmpultd_v8i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
@@ -19135,7 +19135,7 @@
;
; NoVLX-LABEL: test_vpcmpultd_v8i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -19165,8 +19165,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -19197,7 +19197,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
@@ -19230,7 +19230,7 @@
;
; NoVLX-LABEL: test_vpcmpultd_v8i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -19261,7 +19261,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
@@ -19616,8 +19616,8 @@
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v4i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
@@ -19644,7 +19644,7 @@
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v4i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -19674,8 +19674,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v4i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -19707,7 +19707,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v4i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
@@ -19741,7 +19741,7 @@
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v4i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -19772,7 +19772,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v4i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
@@ -19803,18 +19803,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -19831,18 +19831,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -19861,19 +19861,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -19894,19 +19894,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -19928,18 +19928,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -19959,19 +19959,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -19994,18 +19994,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -20022,18 +20022,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -20052,19 +20052,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -20085,19 +20085,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -20119,18 +20119,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -20150,19 +20150,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -20189,8 +20189,8 @@
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
@@ -20215,7 +20215,7 @@
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -20243,8 +20243,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -20274,7 +20274,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
@@ -20306,7 +20306,7 @@
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -20335,7 +20335,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
@@ -20368,8 +20368,8 @@
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
@@ -20395,7 +20395,7 @@
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %xmm1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -20424,8 +20424,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -20456,7 +20456,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
@@ -20489,7 +20489,7 @@
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %xmm1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -20519,7 +20519,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %xmm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
@@ -20549,19 +20549,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v4i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -20578,19 +20578,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v4i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -20609,20 +20609,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -20643,20 +20643,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -20678,19 +20678,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v4i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -20710,20 +20710,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -20746,19 +20746,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v4i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -20775,19 +20775,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v4i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -20806,20 +20806,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -20840,20 +20840,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -20875,19 +20875,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v4i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -20907,20 +20907,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -20948,8 +20948,8 @@
;
; NoVLX-LABEL: test_vpcmpultq_v4i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
@@ -20975,7 +20975,7 @@
;
; NoVLX-LABEL: test_vpcmpultq_v4i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -21004,8 +21004,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -21036,7 +21036,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
@@ -21069,7 +21069,7 @@
;
; NoVLX-LABEL: test_vpcmpultq_v4i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -21099,7 +21099,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
@@ -21133,8 +21133,8 @@
;
; NoVLX-LABEL: test_vpcmpultq_v4i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
@@ -21161,7 +21161,7 @@
;
; NoVLX-LABEL: test_vpcmpultq_v4i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -21191,8 +21191,8 @@
;
; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -21224,7 +21224,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
@@ -21258,7 +21258,7 @@
;
; NoVLX-LABEL: test_vpcmpultq_v4i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -21289,7 +21289,7 @@
;
; NoVLX-LABEL: test_masked_vpcmpultq_v4i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vpbroadcastq (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
@@ -21319,7 +21319,7 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -21327,7 +21327,7 @@
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -21344,7 +21344,7 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -21352,7 +21352,7 @@
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpltuq (%rdi), %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -21371,7 +21371,7 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -21380,7 +21380,7 @@
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -21400,7 +21400,7 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -21409,7 +21409,7 @@
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq (%rsi), %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -21430,7 +21430,7 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -21438,7 +21438,7 @@
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpltuq (%rdi){1to8}, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -21458,7 +21458,7 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -21467,7 +21467,7 @@
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -21812,18 +21812,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -21840,18 +21840,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovaps (%rdi), %xmm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -21869,18 +21869,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vbroadcastss (%rdi), %xmm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -21900,19 +21900,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -21932,19 +21932,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vmovaps (%rsi), %xmm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -21965,19 +21965,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vbroadcastss (%rsi), %xmm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -22000,18 +22000,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -22028,18 +22028,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovaps (%rdi), %xmm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -22057,18 +22057,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vbroadcastss (%rdi), %xmm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -22088,19 +22088,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -22120,19 +22120,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vmovaps (%rsi), %xmm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -22153,19 +22153,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vbroadcastss (%rsi), %xmm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -22192,8 +22192,8 @@
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
@@ -22218,7 +22218,7 @@
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovaps (%rdi), %xmm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -22245,7 +22245,7 @@
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vbroadcastss (%rdi), %xmm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -22274,8 +22274,8 @@
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -22304,7 +22304,7 @@
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vmovaps (%rsi), %xmm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
@@ -22335,7 +22335,7 @@
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vbroadcastss (%rsi), %xmm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
@@ -22368,8 +22368,8 @@
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
@@ -22395,7 +22395,7 @@
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovaps (%rdi), %xmm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -22423,7 +22423,7 @@
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vbroadcastss (%rdi), %xmm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -22453,8 +22453,8 @@
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -22484,7 +22484,7 @@
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vmovaps (%rsi), %xmm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
@@ -22516,7 +22516,7 @@
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vbroadcastss (%rsi), %xmm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
@@ -22546,19 +22546,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -22575,19 +22575,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovaps (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -22605,19 +22605,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vbroadcastss (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -22637,20 +22637,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -22670,20 +22670,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovaps (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -22704,20 +22704,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vbroadcastss (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -22745,8 +22745,8 @@
;
; NoVLX-LABEL: test_vcmpoeqps_v8i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
@@ -22772,7 +22772,7 @@
;
; NoVLX-LABEL: test_vcmpoeqps_v8i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovaps (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -22800,7 +22800,7 @@
;
; NoVLX-LABEL: test_vcmpoeqps_v8i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vbroadcastss (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -22830,8 +22830,8 @@
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -22861,7 +22861,7 @@
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovaps (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
@@ -22893,7 +22893,7 @@
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vbroadcastss (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
@@ -22927,8 +22927,8 @@
;
; NoVLX-LABEL: test_vcmpoeqps_v8i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
@@ -22955,7 +22955,7 @@
;
; NoVLX-LABEL: test_vcmpoeqps_v8i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovaps (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -22984,7 +22984,7 @@
;
; NoVLX-LABEL: test_vcmpoeqps_v8i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vbroadcastss (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -23015,8 +23015,8 @@
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
@@ -23047,7 +23047,7 @@
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovaps (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
@@ -23080,7 +23080,7 @@
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vbroadcastss (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
@@ -23530,8 +23530,8 @@
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v4i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
@@ -23558,7 +23558,7 @@
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v4i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovapd (%rdi), %xmm1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -23587,7 +23587,7 @@
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v4i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -23618,8 +23618,8 @@
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v4i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -23650,7 +23650,7 @@
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v4i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vmovapd (%rsi), %xmm1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
@@ -23683,7 +23683,7 @@
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v4i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
@@ -23714,18 +23714,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -23742,18 +23742,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovapd (%rdi), %xmm1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -23771,18 +23771,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -23802,19 +23802,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -23834,19 +23834,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vmovapd (%rsi), %xmm1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -23867,19 +23867,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -23902,18 +23902,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -23930,18 +23930,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovapd (%rdi), %xmm1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -23959,18 +23959,18 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -23990,19 +23990,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -24022,19 +24022,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vmovapd (%rsi), %xmm1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -24055,19 +24055,19 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -24094,8 +24094,8 @@
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
@@ -24120,7 +24120,7 @@
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovapd (%rdi), %xmm1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -24147,7 +24147,7 @@
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -24176,8 +24176,8 @@
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -24206,7 +24206,7 @@
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vmovapd (%rsi), %xmm1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
@@ -24237,7 +24237,7 @@
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
@@ -24270,8 +24270,8 @@
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
; NoVLX-NEXT: kshiftrw $14, %k0, %k0
@@ -24297,7 +24297,7 @@
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovapd (%rdi), %xmm1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -24325,7 +24325,7 @@
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -24355,8 +24355,8 @@
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $14, %k0, %k0
@@ -24386,7 +24386,7 @@
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vmovapd (%rsi), %xmm1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
@@ -24418,7 +24418,7 @@
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
@@ -24448,19 +24448,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -24477,19 +24477,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovapd (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -24507,19 +24507,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vbroadcastsd (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -24539,20 +24539,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v8i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -24572,20 +24572,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v8i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vmovapd (%rsi), %ymm1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -24606,20 +24606,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v8i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vbroadcastsd (%rsi), %ymm1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -24642,19 +24642,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -24671,19 +24671,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovapd (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -24701,19 +24701,19 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vbroadcastsd (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -24733,20 +24733,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -24766,20 +24766,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vmovapd (%rsi), %ymm1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -24800,20 +24800,20 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vbroadcastsd (%rsi), %ymm1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -24841,8 +24841,8 @@
;
; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
@@ -24868,7 +24868,7 @@
;
; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovapd (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -24896,7 +24896,7 @@
;
; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vbroadcastsd (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -24926,8 +24926,8 @@
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v32i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -24957,7 +24957,7 @@
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v32i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vmovapd (%rsi), %ymm1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
@@ -24989,7 +24989,7 @@
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v32i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vbroadcastsd (%rsi), %ymm1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
@@ -25023,8 +25023,8 @@
;
; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $12, %k0, %k0
@@ -25051,7 +25051,7 @@
;
; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vmovapd (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -25080,7 +25080,7 @@
;
; NoVLX-LABEL: test_vcmpoeqpd_v4i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: vbroadcastsd (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -25111,8 +25111,8 @@
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v64i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
@@ -25143,7 +25143,7 @@
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v64i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vmovapd (%rsi), %ymm1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
@@ -25176,7 +25176,7 @@
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v4i1_v64i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NoVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vbroadcastsd (%rsi), %ymm1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
@@ -25206,7 +25206,7 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -25214,7 +25214,7 @@
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -25231,7 +25231,7 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -25239,7 +25239,7 @@
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vcmpeqpd (%rdi), %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -25257,7 +25257,7 @@
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -25265,7 +25265,7 @@
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vcmpeqpd (%rdi){1to8}, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -25285,7 +25285,7 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -25294,7 +25294,7 @@
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -25314,7 +25314,7 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -25323,7 +25323,7 @@
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqpd (%rsi), %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -25344,7 +25344,7 @@
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -25353,7 +25353,7 @@
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqpd (%rsi){1to8}, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -25377,7 +25377,7 @@
; VLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: movzbl %al, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -25386,7 +25386,7 @@
; NoVLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
; NoVLX-NEXT: movzbl %al, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -25404,7 +25404,7 @@
; VLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: movzbl %al, %eax
-; VLX-NEXT: # kill: def %ax killed %ax killed %eax
+; VLX-NEXT: # kill: def $ax killed $ax killed $eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -25414,7 +25414,7 @@
; NoVLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
; NoVLX-NEXT: movzbl %al, %eax
-; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
+; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -25878,18 +25878,18 @@
; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0
; VLX-NEXT: kshiftlb $4, %k0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: def %al killed %al killed %eax
+; VLX-NEXT: # kill: def $al killed $al killed $eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: mask_zero_lower:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: def %al killed %al killed %eax
+; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
%cmp = icmp ult <4 x i32> %a, zeroinitializer
diff --git a/llvm/test/CodeGen/X86/avx512vl-vec-test-testn.ll b/llvm/test/CodeGen/X86/avx512vl-vec-test-testn.ll
index 89791ab..494ca9a 100644
--- a/llvm/test/CodeGen/X86/avx512vl-vec-test-testn.ll
+++ b/llvm/test/CodeGen/X86/avx512vl-vec-test-testn.ll
@@ -8,14 +8,14 @@
; X86_64: # %bb.0: # %entry
; X86_64-NEXT: vptestmq %xmm0, %xmm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: def %al killed %al killed %eax
+; X86_64-NEXT: # kill: def $al killed $al killed $eax
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_test_epi64_mask:
; I386: # %bb.0: # %entry
; I386-NEXT: vptestmq %xmm0, %xmm1, %k0
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: def %al killed %al killed %eax
+; I386-NEXT: # kill: def $al killed $al killed $eax
; I386-NEXT: retl
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -31,14 +31,14 @@
; X86_64: # %bb.0: # %entry
; X86_64-NEXT: vptestmd %xmm0, %xmm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: def %al killed %al killed %eax
+; X86_64-NEXT: # kill: def $al killed $al killed $eax
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_test_epi32_mask:
; I386: # %bb.0: # %entry
; I386-NEXT: vptestmd %xmm0, %xmm1, %k0
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: def %al killed %al killed %eax
+; I386-NEXT: # kill: def $al killed $al killed $eax
; I386-NEXT: retl
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -55,7 +55,7 @@
; X86_64: # %bb.0: # %entry
; X86_64-NEXT: vptestmq %ymm0, %ymm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: def %al killed %al killed %eax
+; X86_64-NEXT: # kill: def $al killed $al killed $eax
; X86_64-NEXT: vzeroupper
; X86_64-NEXT: retq
;
@@ -63,7 +63,7 @@
; I386: # %bb.0: # %entry
; I386-NEXT: vptestmq %ymm0, %ymm1, %k0
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: def %al killed %al killed %eax
+; I386-NEXT: # kill: def $al killed $al killed $eax
; I386-NEXT: vzeroupper
; I386-NEXT: retl
entry:
@@ -80,7 +80,7 @@
; X86_64: # %bb.0: # %entry
; X86_64-NEXT: vptestmd %ymm0, %ymm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: def %al killed %al killed %eax
+; X86_64-NEXT: # kill: def $al killed $al killed $eax
; X86_64-NEXT: vzeroupper
; X86_64-NEXT: retq
;
@@ -88,7 +88,7 @@
; I386: # %bb.0: # %entry
; I386-NEXT: vptestmd %ymm0, %ymm1, %k0
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: def %al killed %al killed %eax
+; I386-NEXT: # kill: def $al killed $al killed $eax
; I386-NEXT: vzeroupper
; I386-NEXT: retl
entry:
@@ -106,7 +106,7 @@
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestmq %xmm0, %xmm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: def %al killed %al killed %eax
+; X86_64-NEXT: # kill: def $al killed $al killed $eax
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_mask_test_epi64_mask:
@@ -115,7 +115,7 @@
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestmq %xmm0, %xmm1, %k0 {%k1}
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: def %al killed %al killed %eax
+; I386-NEXT: # kill: def $al killed $al killed $eax
; I386-NEXT: retl
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -135,7 +135,7 @@
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestmd %xmm0, %xmm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: def %al killed %al killed %eax
+; X86_64-NEXT: # kill: def $al killed $al killed $eax
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_mask_test_epi32_mask:
@@ -144,7 +144,7 @@
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestmd %xmm0, %xmm1, %k0 {%k1}
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: def %al killed %al killed %eax
+; I386-NEXT: # kill: def $al killed $al killed $eax
; I386-NEXT: retl
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -166,7 +166,7 @@
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestmq %ymm0, %ymm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: def %al killed %al killed %eax
+; X86_64-NEXT: # kill: def $al killed $al killed $eax
; X86_64-NEXT: vzeroupper
; X86_64-NEXT: retq
;
@@ -176,7 +176,7 @@
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestmq %ymm0, %ymm1, %k0 {%k1}
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: def %al killed %al killed %eax
+; I386-NEXT: # kill: def $al killed $al killed $eax
; I386-NEXT: vzeroupper
; I386-NEXT: retl
entry:
@@ -197,7 +197,7 @@
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestmd %ymm0, %ymm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: def %al killed %al killed %eax
+; X86_64-NEXT: # kill: def $al killed $al killed $eax
; X86_64-NEXT: vzeroupper
; X86_64-NEXT: retq
;
@@ -207,7 +207,7 @@
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestmd %ymm0, %ymm1, %k0 {%k1}
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: def %al killed %al killed %eax
+; I386-NEXT: # kill: def $al killed $al killed $eax
; I386-NEXT: vzeroupper
; I386-NEXT: retl
entry:
@@ -226,14 +226,14 @@
; X86_64: # %bb.0: # %entry
; X86_64-NEXT: vptestnmq %xmm0, %xmm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: def %al killed %al killed %eax
+; X86_64-NEXT: # kill: def $al killed $al killed $eax
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_testn_epi64_mask:
; I386: # %bb.0: # %entry
; I386-NEXT: vptestnmq %xmm0, %xmm1, %k0
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: def %al killed %al killed %eax
+; I386-NEXT: # kill: def $al killed $al killed $eax
; I386-NEXT: retl
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -249,14 +249,14 @@
; X86_64: # %bb.0: # %entry
; X86_64-NEXT: vptestnmd %xmm0, %xmm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: def %al killed %al killed %eax
+; X86_64-NEXT: # kill: def $al killed $al killed $eax
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_testn_epi32_mask:
; I386: # %bb.0: # %entry
; I386-NEXT: vptestnmd %xmm0, %xmm1, %k0
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: def %al killed %al killed %eax
+; I386-NEXT: # kill: def $al killed $al killed $eax
; I386-NEXT: retl
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -273,7 +273,7 @@
; X86_64: # %bb.0: # %entry
; X86_64-NEXT: vptestnmq %ymm0, %ymm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: def %al killed %al killed %eax
+; X86_64-NEXT: # kill: def $al killed $al killed $eax
; X86_64-NEXT: vzeroupper
; X86_64-NEXT: retq
;
@@ -281,7 +281,7 @@
; I386: # %bb.0: # %entry
; I386-NEXT: vptestnmq %ymm0, %ymm1, %k0
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: def %al killed %al killed %eax
+; I386-NEXT: # kill: def $al killed $al killed $eax
; I386-NEXT: vzeroupper
; I386-NEXT: retl
entry:
@@ -298,7 +298,7 @@
; X86_64: # %bb.0: # %entry
; X86_64-NEXT: vptestnmd %ymm0, %ymm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: def %al killed %al killed %eax
+; X86_64-NEXT: # kill: def $al killed $al killed $eax
; X86_64-NEXT: vzeroupper
; X86_64-NEXT: retq
;
@@ -306,7 +306,7 @@
; I386: # %bb.0: # %entry
; I386-NEXT: vptestnmd %ymm0, %ymm1, %k0
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: def %al killed %al killed %eax
+; I386-NEXT: # kill: def $al killed $al killed $eax
; I386-NEXT: vzeroupper
; I386-NEXT: retl
entry:
@@ -324,7 +324,7 @@
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestnmq %xmm0, %xmm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: def %al killed %al killed %eax
+; X86_64-NEXT: # kill: def $al killed $al killed $eax
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_mask_testn_epi64_mask:
@@ -333,7 +333,7 @@
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestnmq %xmm0, %xmm1, %k0 {%k1}
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: def %al killed %al killed %eax
+; I386-NEXT: # kill: def $al killed $al killed $eax
; I386-NEXT: retl
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -353,7 +353,7 @@
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestnmd %xmm0, %xmm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: def %al killed %al killed %eax
+; X86_64-NEXT: # kill: def $al killed $al killed $eax
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_mask_testn_epi32_mask:
@@ -362,7 +362,7 @@
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestnmd %xmm0, %xmm1, %k0 {%k1}
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: def %al killed %al killed %eax
+; I386-NEXT: # kill: def $al killed $al killed $eax
; I386-NEXT: retl
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -384,7 +384,7 @@
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestnmq %ymm0, %ymm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: def %al killed %al killed %eax
+; X86_64-NEXT: # kill: def $al killed $al killed $eax
; X86_64-NEXT: vzeroupper
; X86_64-NEXT: retq
;
@@ -394,7 +394,7 @@
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestnmq %ymm0, %ymm1, %k0 {%k1}
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: def %al killed %al killed %eax
+; I386-NEXT: # kill: def $al killed $al killed $eax
; I386-NEXT: vzeroupper
; I386-NEXT: retl
entry:
@@ -415,7 +415,7 @@
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestnmd %ymm0, %ymm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: def %al killed %al killed %eax
+; X86_64-NEXT: # kill: def $al killed $al killed $eax
; X86_64-NEXT: vzeroupper
; X86_64-NEXT: retq
;
@@ -425,7 +425,7 @@
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestnmd %ymm0, %ymm1, %k0 {%k1}
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: def %al killed %al killed %eax
+; I386-NEXT: # kill: def $al killed $al killed $eax
; I386-NEXT: vzeroupper
; I386-NEXT: retl
entry:
diff --git a/llvm/test/CodeGen/X86/bitcast-and-setcc-128.ll b/llvm/test/CodeGen/X86/bitcast-and-setcc-128.ll
index 45af265..3823499 100644
--- a/llvm/test/CodeGen/X86/bitcast-and-setcc-128.ll
+++ b/llvm/test/CodeGen/X86/bitcast-and-setcc-128.ll
@@ -14,7 +14,7 @@
; SSE2-SSSE3-NEXT: pand %xmm0, %xmm2
; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm2
; SSE2-SSSE3-NEXT: pmovmskb %xmm2, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v8i16:
@@ -24,7 +24,7 @@
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: # kill: def %al killed %al killed %eax
+; AVX12-NEXT: # kill: def $al killed $al killed $eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v8i16:
@@ -36,7 +36,7 @@
; AVX512F-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512F-NEXT: vptestmd %ymm0, %ymm0, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -45,7 +45,7 @@
; AVX512BW-NEXT: vpcmpgtw %xmm1, %xmm0, %k1
; AVX512BW-NEXT: vpcmpgtw %xmm3, %xmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
%x0 = icmp sgt <8 x i16> %a, %b
%x1 = icmp sgt <8 x i16> %c, %d
@@ -61,7 +61,7 @@
; SSE2-SSSE3-NEXT: pcmpgtd %xmm3, %xmm2
; SSE2-SSSE3-NEXT: pand %xmm0, %xmm2
; SSE2-SSSE3-NEXT: movmskps %xmm2, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i32:
@@ -70,7 +70,7 @@
; AVX12-NEXT: vpcmpgtd %xmm3, %xmm2, %xmm1
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: # kill: def %al killed %al killed %eax
+; AVX12-NEXT: # kill: def $al killed $al killed $eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4i32:
@@ -78,7 +78,7 @@
; AVX512F-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
; AVX512F-NEXT: vpcmpgtd %xmm3, %xmm2, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v4i32:
@@ -86,7 +86,7 @@
; AVX512BW-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
; AVX512BW-NEXT: vpcmpgtd %xmm3, %xmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
%x0 = icmp sgt <4 x i32> %a, %b
%x1 = icmp sgt <4 x i32> %c, %d
@@ -102,7 +102,7 @@
; SSE2-SSSE3-NEXT: cmpltps %xmm2, %xmm3
; SSE2-SSSE3-NEXT: andps %xmm1, %xmm3
; SSE2-SSSE3-NEXT: movmskps %xmm3, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4f32:
@@ -111,7 +111,7 @@
; AVX12-NEXT: vcmpltps %xmm2, %xmm3, %xmm1
; AVX12-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: # kill: def %al killed %al killed %eax
+; AVX12-NEXT: # kill: def $al killed $al killed $eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4f32:
@@ -119,7 +119,7 @@
; AVX512F-NEXT: vcmpltps %xmm0, %xmm1, %k1
; AVX512F-NEXT: vcmpltps %xmm2, %xmm3, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v4f32:
@@ -127,7 +127,7 @@
; AVX512BW-NEXT: vcmpltps %xmm0, %xmm1, %k1
; AVX512BW-NEXT: vcmpltps %xmm2, %xmm3, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
%x0 = fcmp ogt <4 x float> %a, %b
%x1 = fcmp ogt <4 x float> %c, %d
@@ -143,7 +143,7 @@
; SSE2-SSSE3-NEXT: pcmpgtb %xmm3, %xmm2
; SSE2-SSSE3-NEXT: pand %xmm0, %xmm2
; SSE2-SSSE3-NEXT: pmovmskb %xmm2, %eax
-; SSE2-SSSE3-NEXT: # kill: def %ax killed %ax killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $ax killed $ax killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v16i8:
@@ -152,7 +152,7 @@
; AVX12-NEXT: vpcmpgtb %xmm3, %xmm2, %xmm1
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX12-NEXT: # kill: def $ax killed $ax killed $eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v16i8:
@@ -164,7 +164,7 @@
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX512F-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -173,7 +173,7 @@
; AVX512BW-NEXT: vpcmpgtb %xmm1, %xmm0, %k1
; AVX512BW-NEXT: vpcmpgtb %xmm3, %xmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512BW-NEXT: retq
%x0 = icmp sgt <16 x i8> %a, %b
%x1 = icmp sgt <16 x i8> %c, %d
@@ -236,7 +236,7 @@
; SSE2-SSSE3-NEXT: por %xmm2, %xmm0
; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
; SSE2-SSSE3-NEXT: movmskpd %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i8:
@@ -265,7 +265,7 @@
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vmovmskpd %xmm0, %eax
-; AVX1-NEXT: # kill: def %al killed %al killed %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i8:
@@ -294,7 +294,7 @@
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vmovmskpd %xmm0, %eax
-; AVX2-NEXT: # kill: def %al killed %al killed %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v2i8:
@@ -310,7 +310,7 @@
; AVX512F-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
; AVX512F-NEXT: vpcmpgtq %xmm3, %xmm2, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v2i8:
@@ -326,7 +326,7 @@
; AVX512BW-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
; AVX512BW-NEXT: vpcmpgtq %xmm3, %xmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
%x0 = icmp sgt <2 x i8> %a, %b
%x1 = icmp sgt <2 x i8> %c, %d
@@ -389,7 +389,7 @@
; SSE2-SSSE3-NEXT: por %xmm2, %xmm0
; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
; SSE2-SSSE3-NEXT: movmskpd %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i16:
@@ -418,7 +418,7 @@
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vmovmskpd %xmm0, %eax
-; AVX1-NEXT: # kill: def %al killed %al killed %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i16:
@@ -447,7 +447,7 @@
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vmovmskpd %xmm0, %eax
-; AVX2-NEXT: # kill: def %al killed %al killed %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v2i16:
@@ -463,7 +463,7 @@
; AVX512F-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
; AVX512F-NEXT: vpcmpgtq %xmm3, %xmm2, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v2i16:
@@ -479,7 +479,7 @@
; AVX512BW-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
; AVX512BW-NEXT: vpcmpgtq %xmm3, %xmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
%x0 = icmp sgt <2 x i16> %a, %b
%x1 = icmp sgt <2 x i16> %c, %d
@@ -534,7 +534,7 @@
; SSE2-SSSE3-NEXT: por %xmm2, %xmm0
; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0
; SSE2-SSSE3-NEXT: movmskpd %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i32:
@@ -559,7 +559,7 @@
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vmovmskpd %xmm0, %eax
-; AVX1-NEXT: # kill: def %al killed %al killed %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i32:
@@ -584,7 +584,7 @@
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vmovmskpd %xmm0, %eax
-; AVX2-NEXT: # kill: def %al killed %al killed %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v2i32:
@@ -600,7 +600,7 @@
; AVX512F-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
; AVX512F-NEXT: vpcmpgtq %xmm3, %xmm2, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v2i32:
@@ -616,7 +616,7 @@
; AVX512BW-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
; AVX512BW-NEXT: vpcmpgtq %xmm3, %xmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
%x0 = icmp sgt <2 x i32> %a, %b
%x1 = icmp sgt <2 x i32> %c, %d
@@ -651,7 +651,7 @@
; SSE2-SSSE3-NEXT: por %xmm2, %xmm0
; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
; SSE2-SSSE3-NEXT: movmskpd %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v2i64:
@@ -660,7 +660,7 @@
; AVX12-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm1
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskpd %xmm0, %eax
-; AVX12-NEXT: # kill: def %al killed %al killed %eax
+; AVX12-NEXT: # kill: def $al killed $al killed $eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v2i64:
@@ -668,7 +668,7 @@
; AVX512F-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
; AVX512F-NEXT: vpcmpgtq %xmm3, %xmm2, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v2i64:
@@ -676,7 +676,7 @@
; AVX512BW-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
; AVX512BW-NEXT: vpcmpgtq %xmm3, %xmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
%x0 = icmp sgt <2 x i64> %a, %b
%x1 = icmp sgt <2 x i64> %c, %d
@@ -692,7 +692,7 @@
; SSE2-SSSE3-NEXT: cmpltpd %xmm2, %xmm3
; SSE2-SSSE3-NEXT: andpd %xmm1, %xmm3
; SSE2-SSSE3-NEXT: movmskpd %xmm3, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v2f64:
@@ -701,7 +701,7 @@
; AVX12-NEXT: vcmpltpd %xmm2, %xmm3, %xmm1
; AVX12-NEXT: vandpd %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskpd %xmm0, %eax
-; AVX12-NEXT: # kill: def %al killed %al killed %eax
+; AVX12-NEXT: # kill: def $al killed $al killed $eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v2f64:
@@ -709,7 +709,7 @@
; AVX512F-NEXT: vcmpltpd %xmm0, %xmm1, %k1
; AVX512F-NEXT: vcmpltpd %xmm2, %xmm3, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v2f64:
@@ -717,7 +717,7 @@
; AVX512BW-NEXT: vcmpltpd %xmm0, %xmm1, %k1
; AVX512BW-NEXT: vcmpltpd %xmm2, %xmm3, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
%x0 = fcmp ogt <2 x double> %a, %b
%x1 = fcmp ogt <2 x double> %c, %d
@@ -741,7 +741,7 @@
; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0
; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i8:
@@ -758,7 +758,7 @@
; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: # kill: def %al killed %al killed %eax
+; AVX12-NEXT: # kill: def $al killed $al killed $eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4i8:
@@ -774,7 +774,7 @@
; AVX512F-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
; AVX512F-NEXT: vpcmpgtd %xmm3, %xmm2, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v4i8:
@@ -790,7 +790,7 @@
; AVX512BW-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
; AVX512BW-NEXT: vpcmpgtd %xmm3, %xmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
%x0 = icmp sgt <4 x i8> %a, %b
%x1 = icmp sgt <4 x i8> %c, %d
@@ -814,7 +814,7 @@
; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0
; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i16:
@@ -831,7 +831,7 @@
; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: # kill: def %al killed %al killed %eax
+; AVX12-NEXT: # kill: def $al killed $al killed $eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4i16:
@@ -847,7 +847,7 @@
; AVX512F-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
; AVX512F-NEXT: vpcmpgtd %xmm3, %xmm2, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v4i16:
@@ -863,7 +863,7 @@
; AVX512BW-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
; AVX512BW-NEXT: vpcmpgtd %xmm3, %xmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
%x0 = icmp sgt <4 x i16> %a, %b
%x1 = icmp sgt <4 x i16> %c, %d
@@ -888,7 +888,7 @@
; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0
; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm0
; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v8i8:
@@ -906,7 +906,7 @@
; AVX12-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: # kill: def %al killed %al killed %eax
+; AVX12-NEXT: # kill: def $al killed $al killed $eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v8i8:
@@ -926,7 +926,7 @@
; AVX512F-NEXT: vpmovsxwd %xmm2, %ymm0
; AVX512F-NEXT: vptestmd %ymm0, %ymm0, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -943,7 +943,7 @@
; AVX512BW-NEXT: vpcmpgtw %xmm1, %xmm0, %k1
; AVX512BW-NEXT: vpcmpgtw %xmm3, %xmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
%x0 = icmp sgt <8 x i8> %a, %b
%x1 = icmp sgt <8 x i8> %c, %d
diff --git a/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll b/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll
index e5f058c..4a1672a 100644
--- a/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll
+++ b/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll
@@ -54,7 +54,7 @@
; SSE2-SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2]
; SSE2-SSSE3-NEXT: andps %xmm0, %xmm2
; SSE2-SSSE3-NEXT: movmskps %xmm2, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v4i64:
@@ -71,7 +71,7 @@
; AVX1-NEXT: vpackssdw %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovmskps %xmm0, %eax
-; AVX1-NEXT: # kill: def %al killed %al killed %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -85,7 +85,7 @@
; AVX2-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovmskps %xmm0, %eax
-; AVX2-NEXT: # kill: def %al killed %al killed %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -94,7 +94,7 @@
; AVX512F-NEXT: vpcmpgtq %ymm1, %ymm0, %k1
; AVX512F-NEXT: vpcmpgtq %ymm3, %ymm2, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -103,7 +103,7 @@
; AVX512BW-NEXT: vpcmpgtq %ymm1, %ymm0, %k1
; AVX512BW-NEXT: vpcmpgtq %ymm3, %ymm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x0 = icmp sgt <4 x i64> %a, %b
@@ -124,7 +124,7 @@
; SSE2-SSSE3-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm7[0,2]
; SSE2-SSSE3-NEXT: andps %xmm2, %xmm6
; SSE2-SSSE3-NEXT: movmskps %xmm6, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4f64:
@@ -137,7 +137,7 @@
; AVX12-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: # kill: def %al killed %al killed %eax
+; AVX12-NEXT: # kill: def $al killed $al killed $eax
; AVX12-NEXT: vzeroupper
; AVX12-NEXT: retq
;
@@ -146,7 +146,7 @@
; AVX512F-NEXT: vcmpltpd %ymm0, %ymm1, %k1
; AVX512F-NEXT: vcmpltpd %ymm2, %ymm3, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -155,7 +155,7 @@
; AVX512BW-NEXT: vcmpltpd %ymm0, %ymm1, %k1
; AVX512BW-NEXT: vcmpltpd %ymm2, %ymm3, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x0 = fcmp ogt <4 x double> %a, %b
@@ -176,7 +176,7 @@
; SSE2-SSSE3-NEXT: packsswb %xmm5, %xmm4
; SSE2-SSSE3-NEXT: pand %xmm0, %xmm4
; SSE2-SSSE3-NEXT: pmovmskb %xmm4, %eax
-; SSE2-SSSE3-NEXT: # kill: def %ax killed %ax killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $ax killed $ax killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v16i16:
@@ -193,7 +193,7 @@
; AVX1-NEXT: vpacksswb %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
-; AVX1-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -207,7 +207,7 @@
; AVX2-NEXT: vpacksswb %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -220,7 +220,7 @@
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX512F-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -229,7 +229,7 @@
; AVX512BW-NEXT: vpcmpgtw %ymm1, %ymm0, %k1
; AVX512BW-NEXT: vpcmpgtw %ymm3, %ymm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x0 = icmp sgt <16 x i16> %a, %b
@@ -251,7 +251,7 @@
; SSE2-SSSE3-NEXT: pand %xmm0, %xmm4
; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm4
; SSE2-SSSE3-NEXT: pmovmskb %xmm4, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v8i32:
@@ -269,7 +269,7 @@
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
-; AVX1-NEXT: # kill: def %al killed %al killed %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -284,7 +284,7 @@
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: # kill: def %al killed %al killed %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -293,7 +293,7 @@
; AVX512F-NEXT: vpcmpgtd %ymm1, %ymm0, %k1
; AVX512F-NEXT: vpcmpgtd %ymm3, %ymm2, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -302,7 +302,7 @@
; AVX512BW-NEXT: vpcmpgtd %ymm1, %ymm0, %k1
; AVX512BW-NEXT: vpcmpgtd %ymm3, %ymm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x0 = icmp sgt <8 x i32> %a, %b
@@ -324,7 +324,7 @@
; SSE2-SSSE3-NEXT: pand %xmm2, %xmm6
; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm6
; SSE2-SSSE3-NEXT: pmovmskb %xmm6, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v8f32:
@@ -338,7 +338,7 @@
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: # kill: def %al killed %al killed %eax
+; AVX12-NEXT: # kill: def $al killed $al killed $eax
; AVX12-NEXT: vzeroupper
; AVX12-NEXT: retq
;
@@ -347,7 +347,7 @@
; AVX512F-NEXT: vcmpltps %ymm0, %ymm1, %k1
; AVX512F-NEXT: vcmpltps %ymm2, %ymm3, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -356,7 +356,7 @@
; AVX512BW-NEXT: vcmpltps %ymm0, %ymm1, %k1
; AVX512BW-NEXT: vcmpltps %ymm2, %ymm3, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x0 = fcmp ogt <8 x float> %a, %b
diff --git a/llvm/test/CodeGen/X86/bitcast-and-setcc-512.ll b/llvm/test/CodeGen/X86/bitcast-and-setcc-512.ll
index 7971130..78d4700 100644
--- a/llvm/test/CodeGen/X86/bitcast-and-setcc-512.ll
+++ b/llvm/test/CodeGen/X86/bitcast-and-setcc-512.ll
@@ -29,7 +29,7 @@
; SSE-NEXT: pand %xmm0, %xmm8
; SSE-NEXT: packsswb %xmm0, %xmm8
; SSE-NEXT: pmovmskb %xmm8, %eax
-; SSE-NEXT: # kill: def %al killed %al killed %eax
+; SSE-NEXT: # kill: def $al killed $al killed $eax
; SSE-NEXT: retq
;
; AVX1-LABEL: v8i64:
@@ -64,7 +64,7 @@
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
-; AVX1-NEXT: # kill: def %al killed %al killed %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -92,7 +92,7 @@
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: # kill: def %al killed %al killed %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -101,7 +101,7 @@
; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
; AVX512F-NEXT: vpcmpgtq %zmm3, %zmm2, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -110,7 +110,7 @@
; AVX512BW-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
; AVX512BW-NEXT: vpcmpgtq %zmm3, %zmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x0 = icmp sgt <8 x i64> %a, %b
@@ -144,7 +144,7 @@
; SSE-NEXT: pand %xmm4, %xmm8
; SSE-NEXT: packsswb %xmm0, %xmm8
; SSE-NEXT: pmovmskb %xmm8, %eax
-; SSE-NEXT: # kill: def %al killed %al killed %eax
+; SSE-NEXT: # kill: def $al killed $al killed $eax
; SSE-NEXT: retq
;
; AVX12-LABEL: v8f64:
@@ -171,7 +171,7 @@
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: # kill: def %al killed %al killed %eax
+; AVX12-NEXT: # kill: def $al killed $al killed $eax
; AVX12-NEXT: vzeroupper
; AVX12-NEXT: retq
;
@@ -180,7 +180,7 @@
; AVX512F-NEXT: vcmpltpd %zmm0, %zmm1, %k1
; AVX512F-NEXT: vcmpltpd %zmm2, %zmm3, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -189,7 +189,7 @@
; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm1, %k1
; AVX512BW-NEXT: vcmpltpd %zmm2, %zmm3, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x0 = fcmp ogt <8 x double> %a, %b
@@ -336,7 +336,7 @@
; SSE-NEXT: packsswb %xmm10, %xmm8
; SSE-NEXT: pand %xmm0, %xmm8
; SSE-NEXT: pmovmskb %xmm8, %eax
-; SSE-NEXT: # kill: def %ax killed %ax killed %eax
+; SSE-NEXT: # kill: def $ax killed $ax killed $eax
; SSE-NEXT: retq
;
; AVX1-LABEL: v16i32:
@@ -365,7 +365,7 @@
; AVX1-NEXT: vpacksswb %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
-; AVX1-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -387,7 +387,7 @@
; AVX2-NEXT: vpacksswb %xmm1, %xmm2, %xmm1
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -396,7 +396,7 @@
; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
; AVX512F-NEXT: vpcmpgtd %zmm3, %zmm2, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX512F-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -405,7 +405,7 @@
; AVX512BW-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
; AVX512BW-NEXT: vpcmpgtd %zmm3, %zmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x0 = icmp sgt <16 x i32> %a, %b
@@ -438,7 +438,7 @@
; SSE-NEXT: packsswb %xmm10, %xmm8
; SSE-NEXT: pand %xmm4, %xmm8
; SSE-NEXT: pmovmskb %xmm8, %eax
-; SSE-NEXT: # kill: def %ax killed %ax killed %eax
+; SSE-NEXT: # kill: def $ax killed $ax killed $eax
; SSE-NEXT: retq
;
; AVX12-LABEL: v16f32:
@@ -459,7 +459,7 @@
; AVX12-NEXT: vpacksswb %xmm1, %xmm2, %xmm1
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX12-NEXT: # kill: def $ax killed $ax killed $eax
; AVX12-NEXT: vzeroupper
; AVX12-NEXT: retq
;
@@ -468,7 +468,7 @@
; AVX512F-NEXT: vcmpltps %zmm0, %zmm1, %k1
; AVX512F-NEXT: vcmpltps %zmm2, %zmm3, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX512F-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -477,7 +477,7 @@
; AVX512BW-NEXT: vcmpltps %zmm0, %zmm1, %k1
; AVX512BW-NEXT: vcmpltps %zmm2, %zmm3, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x0 = fcmp ogt <16 x float> %a, %b
diff --git a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll
index a5ef66e..74c48e3 100644
--- a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll
+++ b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll
@@ -13,7 +13,7 @@
define <2 x i64> @ext_i2_2i64(i2 %a0) {
; SSE2-SSSE3-LABEL: ext_i2_2i64:
; SSE2-SSSE3: # %bb.0:
-; SSE2-SSSE3-NEXT: # kill: def %edi killed %edi def %rdi
+; SSE2-SSSE3-NEXT: # kill: def $edi killed $edi def $rdi
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
@@ -25,7 +25,7 @@
;
; AVX1-LABEL: ext_i2_2i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: # kill: def %edi killed %edi def %rdi
+; AVX1-NEXT: # kill: def $edi killed $edi def $rdi
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
@@ -35,7 +35,7 @@
;
; AVX2-LABEL: ext_i2_2i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: def %edi killed %edi def %rdi
+; AVX2-NEXT: # kill: def $edi killed $edi def $rdi
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
@@ -189,7 +189,7 @@
define <4 x i64> @ext_i4_4i64(i4 %a0) {
; SSE2-SSSE3-LABEL: ext_i4_4i64:
; SSE2-SSSE3: # %bb.0:
-; SSE2-SSSE3-NEXT: # kill: def %edi killed %edi def %rdi
+; SSE2-SSSE3-NEXT: # kill: def $edi killed $edi def $rdi
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,0,1]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
@@ -207,7 +207,7 @@
;
; AVX1-LABEL: ext_i4_4i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: # kill: def %edi killed %edi def %rdi
+; AVX1-NEXT: # kill: def $edi killed $edi def $rdi
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -224,7 +224,7 @@
;
; AVX2-LABEL: ext_i4_4i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: def %edi killed %edi def %rdi
+; AVX2-NEXT: # kill: def $edi killed $edi def $rdi
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8]
@@ -423,7 +423,7 @@
define <8 x i64> @ext_i8_8i64(i8 %a0) {
; SSE2-SSSE3-LABEL: ext_i8_8i64:
; SSE2-SSSE3: # %bb.0:
-; SSE2-SSSE3-NEXT: # kill: def %edi killed %edi def %rdi
+; SSE2-SSSE3-NEXT: # kill: def $edi killed $edi def $rdi
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,1,0,1]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
@@ -453,7 +453,7 @@
;
; AVX1-LABEL: ext_i8_8i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: # kill: def %edi killed %edi def %rdi
+; AVX1-NEXT: # kill: def $edi killed $edi def $rdi
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
@@ -477,7 +477,7 @@
;
; AVX2-LABEL: ext_i8_8i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: def %edi killed %edi def %rdi
+; AVX2-NEXT: # kill: def $edi killed $edi def $rdi
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %ymm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [1,2,4,8]
diff --git a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll
index 76f3280..c694cf2 100644
--- a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll
+++ b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll
@@ -14,7 +14,7 @@
define <2 x i64> @ext_i2_2i64(i2 %a0) {
; SSE2-SSSE3-LABEL: ext_i2_2i64:
; SSE2-SSSE3: # %bb.0:
-; SSE2-SSSE3-NEXT: # kill: def %edi killed %edi def %rdi
+; SSE2-SSSE3-NEXT: # kill: def $edi killed $edi def $rdi
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
@@ -27,7 +27,7 @@
;
; AVX1-LABEL: ext_i2_2i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: # kill: def %edi killed %edi def %rdi
+; AVX1-NEXT: # kill: def $edi killed $edi def $rdi
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
@@ -38,7 +38,7 @@
;
; AVX2-LABEL: ext_i2_2i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: def %edi killed %edi def %rdi
+; AVX2-NEXT: # kill: def $edi killed $edi def $rdi
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
@@ -51,7 +51,7 @@
; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -100,7 +100,7 @@
; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -152,7 +152,7 @@
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -238,7 +238,7 @@
define <4 x i64> @ext_i4_4i64(i4 %a0) {
; SSE2-SSSE3-LABEL: ext_i4_4i64:
; SSE2-SSSE3: # %bb.0:
-; SSE2-SSSE3-NEXT: # kill: def %edi killed %edi def %rdi
+; SSE2-SSSE3-NEXT: # kill: def $edi killed $edi def $rdi
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,0,1]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
@@ -258,7 +258,7 @@
;
; AVX1-LABEL: ext_i4_4i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: # kill: def %edi killed %edi def %rdi
+; AVX1-NEXT: # kill: def $edi killed $edi def $rdi
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -277,7 +277,7 @@
;
; AVX2-LABEL: ext_i4_4i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: def %edi killed %edi def %rdi
+; AVX2-NEXT: # kill: def $edi killed $edi def $rdi
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8]
@@ -290,7 +290,7 @@
; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512F-NEXT: retq
;
; AVX512VLBW-LABEL: ext_i4_4i64:
@@ -351,7 +351,7 @@
; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512F-NEXT: retq
;
; AVX512VLBW-LABEL: ext_i8_8i32:
@@ -533,7 +533,7 @@
define <8 x i64> @ext_i8_8i64(i8 %a0) {
; SSE2-SSSE3-LABEL: ext_i8_8i64:
; SSE2-SSSE3: # %bb.0:
-; SSE2-SSSE3-NEXT: # kill: def %edi killed %edi def %rdi
+; SSE2-SSSE3-NEXT: # kill: def $edi killed $edi def $rdi
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,1,0,1]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
@@ -567,7 +567,7 @@
;
; AVX1-LABEL: ext_i8_8i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: # kill: def %edi killed %edi def %rdi
+; AVX1-NEXT: # kill: def $edi killed $edi def $rdi
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
@@ -595,7 +595,7 @@
;
; AVX2-LABEL: ext_i8_8i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: def %edi killed %edi def %rdi
+; AVX2-NEXT: # kill: def $edi killed $edi def $rdi
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %ymm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [1,2,4,8]
diff --git a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool.ll b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool.ll
index 8af95df..c1b1140 100644
--- a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool.ll
+++ b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool.ll
@@ -8,7 +8,7 @@
define <2 x i1> @bitcast_i2_2i1(i2 zeroext %a0) {
; SSE2-SSSE3-LABEL: bitcast_i2_2i1:
; SSE2-SSSE3: # %bb.0:
-; SSE2-SSSE3-NEXT: # kill: def %edi killed %edi def %rdi
+; SSE2-SSSE3-NEXT: # kill: def $edi killed $edi def $rdi
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
@@ -21,7 +21,7 @@
;
; AVX1-LABEL: bitcast_i2_2i1:
; AVX1: # %bb.0:
-; AVX1-NEXT: # kill: def %edi killed %edi def %rdi
+; AVX1-NEXT: # kill: def $edi killed $edi def $rdi
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
@@ -32,7 +32,7 @@
;
; AVX2-LABEL: bitcast_i2_2i1:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: def %edi killed %edi def %rdi
+; AVX2-NEXT: # kill: def $edi killed $edi def $rdi
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
diff --git a/llvm/test/CodeGen/X86/bitcast-int-to-vector.ll b/llvm/test/CodeGen/X86/bitcast-int-to-vector.ll
index 64fcf3d..1a04fef 100644
--- a/llvm/test/CodeGen/X86/bitcast-int-to-vector.ll
+++ b/llvm/test/CodeGen/X86/bitcast-int-to-vector.ll
@@ -10,7 +10,7 @@
; X86-NEXT: flds {{[0-9]+}}(%esp)
; X86-NEXT: fucompp
; X86-NEXT: fnstsw %ax
-; X86-NEXT: # kill: def %ah killed %ah killed %ax
+; X86-NEXT: # kill: def $ah killed $ah killed $ax
; X86-NEXT: sahf
; X86-NEXT: setp %al
; X86-NEXT: retl
diff --git a/llvm/test/CodeGen/X86/bitcast-setcc-128.ll b/llvm/test/CodeGen/X86/bitcast-setcc-128.ll
index a96c1a3..26ba69a 100644
--- a/llvm/test/CodeGen/X86/bitcast-setcc-128.ll
+++ b/llvm/test/CodeGen/X86/bitcast-setcc-128.ll
@@ -12,7 +12,7 @@
; SSE2-SSSE3-NEXT: pcmpgtw %xmm1, %xmm0
; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm0
; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v8i16:
@@ -20,7 +20,7 @@
; AVX12-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: # kill: def %al killed %al killed %eax
+; AVX12-NEXT: # kill: def $al killed $al killed $eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v8i16:
@@ -29,7 +29,7 @@
; AVX512F-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512F-NEXT: vptestmd %ymm0, %ymm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -37,7 +37,7 @@
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
%x = icmp sgt <8 x i16> %a, %b
%res = bitcast <8 x i1> %x to i8
@@ -49,28 +49,28 @@
; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i32:
; AVX12: # %bb.0:
; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: # kill: def %al killed %al killed %eax
+; AVX12-NEXT: # kill: def $al killed $al killed $eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4i32:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v4i32:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
%x = icmp sgt <4 x i32> %a, %b
%res = bitcast <4 x i1> %x to i4
@@ -82,28 +82,28 @@
; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: cmpltps %xmm0, %xmm1
; SSE2-SSSE3-NEXT: movmskps %xmm1, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4f32:
; AVX12: # %bb.0:
; AVX12-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: # kill: def %al killed %al killed %eax
+; AVX12-NEXT: # kill: def $al killed $al killed $eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4f32:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltps %xmm0, %xmm1, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v4f32:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltps %xmm0, %xmm1, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
%x = fcmp ogt <4 x float> %a, %b
%res = bitcast <4 x i1> %x to i4
@@ -115,14 +115,14 @@
; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: pcmpgtb %xmm1, %xmm0
; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: def %ax killed %ax killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $ax killed $ax killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v16i8:
; AVX12: # %bb.0:
; AVX12-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX12-NEXT: # kill: def $ax killed $ax killed $eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v16i8:
@@ -131,7 +131,7 @@
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX512F-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -139,7 +139,7 @@
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtb %xmm1, %xmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512BW-NEXT: retq
%x = icmp sgt <16 x i8> %a, %b
%res = bitcast <16 x i1> %x to i16
@@ -175,7 +175,7 @@
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
; SSE2-SSSE3-NEXT: por %xmm0, %xmm1
; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i8:
@@ -192,7 +192,7 @@
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovmskpd %xmm0, %eax
-; AVX1-NEXT: # kill: def %al killed %al killed %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i8:
@@ -209,7 +209,7 @@
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovmskpd %xmm0, %eax
-; AVX2-NEXT: # kill: def %al killed %al killed %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v2i8:
@@ -220,7 +220,7 @@
; AVX512F-NEXT: vpsraq $56, %xmm0, %xmm0
; AVX512F-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v2i8:
@@ -231,7 +231,7 @@
; AVX512BW-NEXT: vpsraq $56, %xmm0, %xmm0
; AVX512BW-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
%x = icmp sgt <2 x i8> %a, %b
%res = bitcast <2 x i1> %x to i2
@@ -267,7 +267,7 @@
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
; SSE2-SSSE3-NEXT: por %xmm0, %xmm1
; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i16:
@@ -284,7 +284,7 @@
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovmskpd %xmm0, %eax
-; AVX1-NEXT: # kill: def %al killed %al killed %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i16:
@@ -301,7 +301,7 @@
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovmskpd %xmm0, %eax
-; AVX2-NEXT: # kill: def %al killed %al killed %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v2i16:
@@ -312,7 +312,7 @@
; AVX512F-NEXT: vpsraq $48, %xmm0, %xmm0
; AVX512F-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v2i16:
@@ -323,7 +323,7 @@
; AVX512BW-NEXT: vpsraq $48, %xmm0, %xmm0
; AVX512BW-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
%x = icmp sgt <2 x i16> %a, %b
%res = bitcast <2 x i1> %x to i2
@@ -355,7 +355,7 @@
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSE2-SSSE3-NEXT: por %xmm0, %xmm1
; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i32:
@@ -370,7 +370,7 @@
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovmskpd %xmm0, %eax
-; AVX1-NEXT: # kill: def %al killed %al killed %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i32:
@@ -385,7 +385,7 @@
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovmskpd %xmm0, %eax
-; AVX2-NEXT: # kill: def %al killed %al killed %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v2i32:
@@ -396,7 +396,7 @@
; AVX512F-NEXT: vpsraq $32, %xmm0, %xmm0
; AVX512F-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v2i32:
@@ -407,7 +407,7 @@
; AVX512BW-NEXT: vpsraq $32, %xmm0, %xmm0
; AVX512BW-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
%x = icmp sgt <2 x i32> %a, %b
%res = bitcast <2 x i1> %x to i2
@@ -429,28 +429,28 @@
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
; SSE2-SSSE3-NEXT: por %xmm0, %xmm1
; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v2i64:
; AVX12: # %bb.0:
; AVX12-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskpd %xmm0, %eax
-; AVX12-NEXT: # kill: def %al killed %al killed %eax
+; AVX12-NEXT: # kill: def $al killed $al killed $eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v2i64:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v2i64:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
%x = icmp sgt <2 x i64> %a, %b
%res = bitcast <2 x i1> %x to i2
@@ -462,28 +462,28 @@
; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: cmpltpd %xmm0, %xmm1
; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v2f64:
; AVX12: # %bb.0:
; AVX12-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
; AVX12-NEXT: vmovmskpd %xmm0, %eax
-; AVX12-NEXT: # kill: def %al killed %al killed %eax
+; AVX12-NEXT: # kill: def $al killed $al killed $eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v2f64:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltpd %xmm0, %xmm1, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v2f64:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltpd %xmm0, %xmm1, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
%x = fcmp ogt <2 x double> %a, %b
%res = bitcast <2 x i1> %x to i2
@@ -499,7 +499,7 @@
; SSE2-SSSE3-NEXT: psrad $24, %xmm0
; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i8:
@@ -510,7 +510,7 @@
; AVX12-NEXT: vpsrad $24, %xmm0, %xmm0
; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: # kill: def %al killed %al killed %eax
+; AVX12-NEXT: # kill: def $al killed $al killed $eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4i8:
@@ -521,7 +521,7 @@
; AVX512F-NEXT: vpsrad $24, %xmm0, %xmm0
; AVX512F-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v4i8:
@@ -532,7 +532,7 @@
; AVX512BW-NEXT: vpsrad $24, %xmm0, %xmm0
; AVX512BW-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
%x = icmp sgt <4 x i8> %a, %b
%res = bitcast <4 x i1> %x to i4
@@ -548,7 +548,7 @@
; SSE2-SSSE3-NEXT: psrad $16, %xmm0
; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i16:
@@ -559,7 +559,7 @@
; AVX12-NEXT: vpsrad $16, %xmm0, %xmm0
; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: # kill: def %al killed %al killed %eax
+; AVX12-NEXT: # kill: def $al killed $al killed $eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4i16:
@@ -570,7 +570,7 @@
; AVX512F-NEXT: vpsrad $16, %xmm0, %xmm0
; AVX512F-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: v4i16:
@@ -581,7 +581,7 @@
; AVX512BW-NEXT: vpsrad $16, %xmm0, %xmm0
; AVX512BW-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
%x = icmp sgt <4 x i16> %a, %b
%res = bitcast <4 x i1> %x to i4
@@ -598,7 +598,7 @@
; SSE2-SSSE3-NEXT: pcmpgtw %xmm1, %xmm0
; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm0
; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v8i8:
@@ -610,7 +610,7 @@
; AVX12-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: # kill: def %al killed %al killed %eax
+; AVX12-NEXT: # kill: def $al killed $al killed $eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v8i8:
@@ -623,7 +623,7 @@
; AVX512F-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512F-NEXT: vptestmd %ymm0, %ymm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -635,7 +635,7 @@
; AVX512BW-NEXT: vpsraw $8, %xmm0, %xmm0
; AVX512BW-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
%x = icmp sgt <8 x i8> %a, %b
%res = bitcast <8 x i1> %x to i8
diff --git a/llvm/test/CodeGen/X86/bitcast-setcc-256.ll b/llvm/test/CodeGen/X86/bitcast-setcc-256.ll
index d4151c9..9bfd434 100644
--- a/llvm/test/CodeGen/X86/bitcast-setcc-256.ll
+++ b/llvm/test/CodeGen/X86/bitcast-setcc-256.ll
@@ -13,7 +13,7 @@
; SSE2-SSSE3-NEXT: pcmpgtw %xmm2, %xmm0
; SSE2-SSSE3-NEXT: packsswb %xmm1, %xmm0
; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: def %ax killed %ax killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $ax killed $ax killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v16i16:
@@ -24,7 +24,7 @@
; AVX1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
-; AVX1-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -34,7 +34,7 @@
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -44,7 +44,7 @@
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX512F-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -52,7 +52,7 @@
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x = icmp sgt <16 x i16> %a, %b
@@ -68,7 +68,7 @@
; SSE2-SSSE3-NEXT: packssdw %xmm1, %xmm0
; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm0
; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v8i32:
@@ -79,7 +79,7 @@
; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vmovmskps %ymm0, %eax
-; AVX1-NEXT: # kill: def %al killed %al killed %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -87,7 +87,7 @@
; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovmskps %ymm0, %eax
-; AVX2-NEXT: # kill: def %al killed %al killed %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -95,7 +95,7 @@
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -103,7 +103,7 @@
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x = icmp sgt <8 x i32> %a, %b
@@ -119,14 +119,14 @@
; SSE2-SSSE3-NEXT: packssdw %xmm3, %xmm2
; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm2
; SSE2-SSSE3-NEXT: pmovmskb %xmm2, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v8f32:
; AVX12: # %bb.0:
; AVX12-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX12-NEXT: vmovmskps %ymm0, %eax
-; AVX12-NEXT: # kill: def %al killed %al killed %eax
+; AVX12-NEXT: # kill: def $al killed $al killed $eax
; AVX12-NEXT: vzeroupper
; AVX12-NEXT: retq
;
@@ -134,7 +134,7 @@
; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltps %ymm0, %ymm1, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -142,7 +142,7 @@
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltps %ymm0, %ymm1, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x = fcmp ogt <8 x float> %a, %b
@@ -233,7 +233,7 @@
; SSE2-SSSE3-NEXT: por %xmm0, %xmm1
; SSE2-SSSE3-NEXT: packssdw %xmm3, %xmm1
; SSE2-SSSE3-NEXT: movmskps %xmm1, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v4i64:
@@ -244,7 +244,7 @@
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vmovmskpd %ymm0, %eax
-; AVX1-NEXT: # kill: def %al killed %al killed %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -252,7 +252,7 @@
; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovmskpd %ymm0, %eax
-; AVX2-NEXT: # kill: def %al killed %al killed %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -260,7 +260,7 @@
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -268,7 +268,7 @@
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x = icmp sgt <4 x i64> %a, %b
@@ -283,14 +283,14 @@
; SSE2-SSSE3-NEXT: cmpltpd %xmm0, %xmm2
; SSE2-SSSE3-NEXT: packssdw %xmm3, %xmm2
; SSE2-SSSE3-NEXT: movmskps %xmm2, %eax
-; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-SSSE3-NEXT: # kill: def $al killed $al killed $eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4f64:
; AVX12: # %bb.0:
; AVX12-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX12-NEXT: vmovmskpd %ymm0, %eax
-; AVX12-NEXT: # kill: def %al killed %al killed %eax
+; AVX12-NEXT: # kill: def $al killed $al killed $eax
; AVX12-NEXT: vzeroupper
; AVX12-NEXT: retq
;
@@ -298,7 +298,7 @@
; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltpd %ymm0, %ymm1, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -306,7 +306,7 @@
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltpd %ymm0, %ymm1, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x = fcmp ogt <4 x double> %a, %b
diff --git a/llvm/test/CodeGen/X86/bitcast-setcc-512.ll b/llvm/test/CodeGen/X86/bitcast-setcc-512.ll
index 313167b..bafc8ec 100644
--- a/llvm/test/CodeGen/X86/bitcast-setcc-512.ll
+++ b/llvm/test/CodeGen/X86/bitcast-setcc-512.ll
@@ -86,7 +86,7 @@
; SSE-NEXT: packssdw %xmm1, %xmm0
; SSE-NEXT: packsswb %xmm2, %xmm0
; SSE-NEXT: pmovmskb %xmm0, %eax
-; SSE-NEXT: # kill: def %ax killed %ax killed %eax
+; SSE-NEXT: # kill: def $ax killed $ax killed $eax
; SSE-NEXT: retq
;
; AVX1-LABEL: v16i32:
@@ -103,7 +103,7 @@
; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
-; AVX1-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -116,7 +116,7 @@
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -124,7 +124,7 @@
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX512F-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -132,7 +132,7 @@
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x = icmp sgt <16 x i32> %a, %b
@@ -151,7 +151,7 @@
; SSE-NEXT: packssdw %xmm5, %xmm4
; SSE-NEXT: packsswb %xmm6, %xmm4
; SSE-NEXT: pmovmskb %xmm4, %eax
-; SSE-NEXT: # kill: def %ax killed %ax killed %eax
+; SSE-NEXT: # kill: def $ax killed $ax killed $eax
; SSE-NEXT: retq
;
; AVX1-LABEL: v16f32:
@@ -164,7 +164,7 @@
; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
-; AVX1-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -177,7 +177,7 @@
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -185,7 +185,7 @@
; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltps %zmm0, %zmm1, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX512F-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -193,7 +193,7 @@
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltps %zmm0, %zmm1, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x = fcmp ogt <16 x float> %a, %b
@@ -1046,7 +1046,7 @@
; SSE-NEXT: packssdw %xmm2, %xmm0
; SSE-NEXT: packsswb %xmm0, %xmm0
; SSE-NEXT: pmovmskb %xmm0, %eax
-; SSE-NEXT: # kill: def %al killed %al killed %eax
+; SSE-NEXT: # kill: def $al killed $al killed $eax
; SSE-NEXT: retq
;
; AVX1-LABEL: v8i64:
@@ -1063,7 +1063,7 @@
; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vmovmskps %ymm0, %eax
-; AVX1-NEXT: # kill: def %al killed %al killed %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -1074,7 +1074,7 @@
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: vmovmskps %ymm0, %eax
-; AVX2-NEXT: # kill: def %al killed %al killed %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -1082,7 +1082,7 @@
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -1090,7 +1090,7 @@
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x = icmp sgt <8 x i64> %a, %b
@@ -1110,7 +1110,7 @@
; SSE-NEXT: packssdw %xmm6, %xmm4
; SSE-NEXT: packsswb %xmm0, %xmm4
; SSE-NEXT: pmovmskb %xmm4, %eax
-; SSE-NEXT: # kill: def %al killed %al killed %eax
+; SSE-NEXT: # kill: def $al killed $al killed $eax
; SSE-NEXT: retq
;
; AVX1-LABEL: v8f64:
@@ -1123,7 +1123,7 @@
; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vmovmskps %ymm0, %eax
-; AVX1-NEXT: # kill: def %al killed %al killed %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -1134,7 +1134,7 @@
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: vmovmskps %ymm0, %eax
-; AVX2-NEXT: # kill: def %al killed %al killed %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -1142,7 +1142,7 @@
; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -1150,7 +1150,7 @@
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x = fcmp ogt <8 x double> %a, %b
diff --git a/llvm/test/CodeGen/X86/bitreverse.ll b/llvm/test/CodeGen/X86/bitreverse.ll
index a393db3..aeca4c3 100644
--- a/llvm/test/CodeGen/X86/bitreverse.ll
+++ b/llvm/test/CodeGen/X86/bitreverse.ll
@@ -46,8 +46,8 @@
; X86-NEXT: andl $43690, %ecx # imm = 0xAAAA
; X86-NEXT: shrl %ecx
; X86-NEXT: leal (%ecx,%edx,2), %edx
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
-; X86-NEXT: # kill: def %dx killed %dx killed %edx
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NEXT: # kill: def $dx killed $dx killed $edx
; X86-NEXT: retl
;
; X64-LABEL: test_bitreverse_v2i16:
@@ -191,7 +191,7 @@
;
; X64-LABEL: test_bitreverse_i32:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: bswapl %edi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
@@ -242,7 +242,7 @@
;
; X64-LABEL: test_bitreverse_i24:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: bswapl %edi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
@@ -289,12 +289,12 @@
; X86-NEXT: andl $43690, %eax # imm = 0xAAAA
; X86-NEXT: shrl %eax
; X86-NEXT: leal (%eax,%ecx,2), %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_bitreverse_i16:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: rolw $8, %di
; X64-NEXT: movl %edi, %eax
; X64-NEXT: andl $3855, %eax # imm = 0xF0F
@@ -312,7 +312,7 @@
; X64-NEXT: andl $43690, %eax # imm = 0xAAAA
; X64-NEXT: shrl %eax
; X64-NEXT: leal (%rax,%rcx,2), %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%b = call i16 @llvm.bitreverse.i16(i16 %a)
ret i16 %b
diff --git a/llvm/test/CodeGen/X86/block-placement.mir b/llvm/test/CodeGen/X86/block-placement.mir
index 600bc13..bd8f446 100644
--- a/llvm/test/CodeGen/X86/block-placement.mir
+++ b/llvm/test/CodeGen/X86/block-placement.mir
@@ -43,10 +43,10 @@
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%esi' }
+ - { reg: '$rdi' }
+ - { reg: '$esi' }
-# CHECK: %eax = FAULTING_OP 1, %bb.3, 1684, killed %rdi, 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr)
+# CHECK: $eax = FAULTING_OP 1, %bb.3, 1684, killed $rdi, 1, $noreg, 0, $noreg :: (load 4 from %ir.ptr)
# CHECK-NEXT: JMP_1 %bb.2
# CHECK: bb.3.null:
# CHECK: bb.4.right:
@@ -55,33 +55,33 @@
body: |
bb.0.entry:
successors: %bb.1(0x7ffff800), %bb.3(0x00000800)
- liveins: %esi, %rdi
+ liveins: $esi, $rdi
- frame-setup PUSH64r undef %rax, implicit-def %rsp, implicit %rsp
+ frame-setup PUSH64r undef $rax, implicit-def $rsp, implicit $rsp
CFI_INSTRUCTION def_cfa_offset 16
- TEST8ri %sil, 1, implicit-def %eflags, implicit killed %esi
- JE_1 %bb.3, implicit killed %eflags
+ TEST8ri $sil, 1, implicit-def $eflags, implicit killed $esi
+ JE_1 %bb.3, implicit killed $eflags
bb.1.left:
successors: %bb.2(0x7ffff800), %bb.4(0x00000800)
- liveins: %rdi
+ liveins: $rdi
- %eax = FAULTING_OP 1, %bb.2, 1684, killed %rdi, 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr)
+ $eax = FAULTING_OP 1, %bb.2, 1684, killed $rdi, 1, $noreg, 0, $noreg :: (load 4 from %ir.ptr)
JMP_1 %bb.4
bb.4.not_null:
- liveins: %rdi, %eax
+ liveins: $rdi, $eax
- %rcx = POP64r implicit-def %rsp, implicit %rsp
- RETQ %eax
+ $rcx = POP64r implicit-def $rsp, implicit $rsp
+ RETQ $eax
bb.2.null:
- liveins: %rdi
+ liveins: $rdi
- CALL64pcrel32 @stub, csr_64, implicit %rsp, implicit %rdi, implicit-def %rsp
+ CALL64pcrel32 @stub, csr_64, implicit $rsp, implicit $rdi, implicit-def $rsp
bb.3.right:
- dead %edi = XOR32rr undef %edi, undef %edi, implicit-def dead %eflags, implicit-def %rdi
- CALL64pcrel32 @stub, csr_64, implicit %rsp, implicit %rdi, implicit-def %rsp
+ dead $edi = XOR32rr undef $edi, undef $edi, implicit-def dead $eflags, implicit-def $rdi
+ CALL64pcrel32 @stub, csr_64, implicit $rsp, implicit $rdi, implicit-def $rsp
...
diff --git a/llvm/test/CodeGen/X86/bmi-schedule.ll b/llvm/test/CodeGen/X86/bmi-schedule.ll
index 8d41a51..737f873 100644
--- a/llvm/test/CodeGen/X86/bmi-schedule.ll
+++ b/llvm/test/CodeGen/X86/bmi-schedule.ll
@@ -14,7 +14,7 @@
; GENERIC-NEXT: notl %edi # sched: [1:0.33]
; GENERIC-NEXT: andw (%rdx), %di # sched: [6:0.50]
; GENERIC-NEXT: addl %edi, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
+; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_andn_i16:
@@ -23,7 +23,7 @@
; HASWELL-NEXT: notl %edi # sched: [1:0.25]
; HASWELL-NEXT: andw (%rdx), %di # sched: [6:0.50]
; HASWELL-NEXT: addl %edi, %eax # sched: [1:0.25]
-; HASWELL-NEXT: # kill: def %ax killed %ax killed %eax
+; HASWELL-NEXT: # kill: def $ax killed $ax killed $eax
; HASWELL-NEXT: retq # sched: [7:1.00]
;
; BROADWELL-LABEL: test_andn_i16:
@@ -32,7 +32,7 @@
; BROADWELL-NEXT: notl %edi # sched: [1:0.25]
; BROADWELL-NEXT: andw (%rdx), %di # sched: [6:0.50]
; BROADWELL-NEXT: addl %edi, %eax # sched: [1:0.25]
-; BROADWELL-NEXT: # kill: def %ax killed %ax killed %eax
+; BROADWELL-NEXT: # kill: def $ax killed $ax killed $eax
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_andn_i16:
@@ -41,7 +41,7 @@
; SKYLAKE-NEXT: notl %edi # sched: [1:0.25]
; SKYLAKE-NEXT: andw (%rdx), %di # sched: [6:0.50]
; SKYLAKE-NEXT: addl %edi, %eax # sched: [1:0.25]
-; SKYLAKE-NEXT: # kill: def %ax killed %ax killed %eax
+; SKYLAKE-NEXT: # kill: def $ax killed $ax killed $eax
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_andn_i16:
@@ -50,7 +50,7 @@
; BTVER2-NEXT: notl %edi # sched: [1:0.50]
; BTVER2-NEXT: andw (%rdx), %di # sched: [4:1.00]
; BTVER2-NEXT: addl %edi, %eax # sched: [1:0.50]
-; BTVER2-NEXT: # kill: def %ax killed %ax killed %eax
+; BTVER2-NEXT: # kill: def $ax killed $ax killed $eax
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_andn_i16:
@@ -59,7 +59,7 @@
; ZNVER1-NEXT: notl %edi # sched: [1:0.25]
; ZNVER1-NEXT: andw (%rdx), %di # sched: [5:0.50]
; ZNVER1-NEXT: addl %edi, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: # kill: def %ax killed %ax killed %eax
+; ZNVER1-NEXT: # kill: def $ax killed $ax killed $eax
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i16, i16 *%a2
%2 = xor i16 %a0, -1
@@ -581,7 +581,7 @@
; GENERIC-NEXT: tzcntw (%rsi), %cx # sched: [7:1.00]
; GENERIC-NEXT: tzcntw %di, %ax # sched: [3:1.00]
; GENERIC-NEXT: orl %ecx, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
+; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cttz_i16:
@@ -589,7 +589,7 @@
; HASWELL-NEXT: tzcntw (%rsi), %cx # sched: [8:1.00]
; HASWELL-NEXT: tzcntw %di, %ax # sched: [3:1.00]
; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; HASWELL-NEXT: # kill: def %ax killed %ax killed %eax
+; HASWELL-NEXT: # kill: def $ax killed $ax killed $eax
; HASWELL-NEXT: retq # sched: [7:1.00]
;
; BROADWELL-LABEL: test_cttz_i16:
@@ -597,7 +597,7 @@
; BROADWELL-NEXT: tzcntw (%rsi), %cx # sched: [8:1.00]
; BROADWELL-NEXT: tzcntw %di, %ax # sched: [3:1.00]
; BROADWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; BROADWELL-NEXT: # kill: def %ax killed %ax killed %eax
+; BROADWELL-NEXT: # kill: def $ax killed $ax killed $eax
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cttz_i16:
@@ -605,7 +605,7 @@
; SKYLAKE-NEXT: tzcntw (%rsi), %cx # sched: [8:1.00]
; SKYLAKE-NEXT: tzcntw %di, %ax # sched: [3:1.00]
; SKYLAKE-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; SKYLAKE-NEXT: # kill: def %ax killed %ax killed %eax
+; SKYLAKE-NEXT: # kill: def $ax killed $ax killed $eax
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cttz_i16:
@@ -613,7 +613,7 @@
; BTVER2-NEXT: tzcntw (%rsi), %cx # sched: [6:1.00]
; BTVER2-NEXT: tzcntw %di, %ax # sched: [3:1.00]
; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; BTVER2-NEXT: # kill: def %ax killed %ax killed %eax
+; BTVER2-NEXT: # kill: def $ax killed $ax killed $eax
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cttz_i16:
@@ -621,7 +621,7 @@
; ZNVER1-NEXT: tzcntw (%rsi), %cx # sched: [6:0.50]
; ZNVER1-NEXT: tzcntw %di, %ax # sched: [2:0.25]
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: # kill: def %ax killed %ax killed %eax
+; ZNVER1-NEXT: # kill: def $ax killed $ax killed $eax
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i16, i16 *%a1
%2 = tail call i16 @llvm.cttz.i16( i16 %1, i1 false )
diff --git a/llvm/test/CodeGen/X86/bmi.ll b/llvm/test/CodeGen/X86/bmi.ll
index be130a6..4da10f6 100644
--- a/llvm/test/CodeGen/X86/bmi.ll
+++ b/llvm/test/CodeGen/X86/bmi.ll
@@ -13,7 +13,7 @@
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: orl $256, %eax # imm = 0x100
; CHECK-NEXT: tzcntl %eax, %eax
-; CHECK-NEXT: # kill: def %al killed %al killed %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%tmp = tail call i8 @llvm.cttz.i8( i8 %x, i1 false )
ret i8 %tmp
@@ -61,7 +61,7 @@
; CHECK: # %bb.0:
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: tzcntl %eax, %eax
-; CHECK-NEXT: # kill: def %al killed %al killed %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%tmp = tail call i8 @llvm.cttz.i8( i8 %x, i1 true )
ret i8 %tmp
@@ -516,7 +516,7 @@
; BMI1-NEXT: movl $32, %ecx
; BMI1-NEXT: subl %esi, %ecx
; BMI1-NEXT: movl $-1, %eax
-; BMI1-NEXT: # kill: def %cl killed %cl killed %ecx
+; BMI1-NEXT: # kill: def $cl killed $cl killed $ecx
; BMI1-NEXT: shrl %cl, %eax
; BMI1-NEXT: andl %edi, %eax
; BMI1-NEXT: retq
@@ -538,7 +538,7 @@
; BMI1-NEXT: movl $32, %ecx
; BMI1-NEXT: subl %esi, %ecx
; BMI1-NEXT: shll %cl, %edi
-; BMI1-NEXT: # kill: def %cl killed %cl killed %ecx
+; BMI1-NEXT: # kill: def $cl killed $cl killed $ecx
; BMI1-NEXT: shrl %cl, %edi
; BMI1-NEXT: movl %edi, %eax
; BMI1-NEXT: retq
@@ -566,7 +566,7 @@
;
; BMI2-LABEL: bzhi64b:
; BMI2: # %bb.0: # %entry
-; BMI2-NEXT: # kill: def %esi killed %esi def %rsi
+; BMI2-NEXT: # kill: def $esi killed $esi def $rsi
; BMI2-NEXT: bzhiq %rsi, %rdi, %rax
; BMI2-NEXT: retq
entry:
@@ -583,7 +583,7 @@
; BMI1-NEXT: movl $64, %ecx
; BMI1-NEXT: subl %esi, %ecx
; BMI1-NEXT: movq $-1, %rax
-; BMI1-NEXT: # kill: def %cl killed %cl killed %ecx
+; BMI1-NEXT: # kill: def $cl killed $cl killed $ecx
; BMI1-NEXT: shrq %cl, %rax
; BMI1-NEXT: andq %rdi, %rax
; BMI1-NEXT: retq
@@ -605,14 +605,14 @@
; BMI1-NEXT: movl $64, %ecx
; BMI1-NEXT: subl %esi, %ecx
; BMI1-NEXT: movq $-1, %rax
-; BMI1-NEXT: # kill: def %cl killed %cl killed %ecx
+; BMI1-NEXT: # kill: def $cl killed $cl killed $ecx
; BMI1-NEXT: shrq %cl, %rax
; BMI1-NEXT: andq %rdi, %rax
; BMI1-NEXT: retq
;
; BMI2-LABEL: bzhi64d:
; BMI2: # %bb.0: # %entry
-; BMI2-NEXT: # kill: def %esi killed %esi def %rsi
+; BMI2-NEXT: # kill: def $esi killed $esi def $rsi
; BMI2-NEXT: bzhiq %rsi, %rdi, %rax
; BMI2-NEXT: retq
entry:
@@ -629,7 +629,7 @@
; BMI1-NEXT: movl $64, %ecx
; BMI1-NEXT: subl %esi, %ecx
; BMI1-NEXT: shlq %cl, %rdi
-; BMI1-NEXT: # kill: def %cl killed %cl killed %ecx
+; BMI1-NEXT: # kill: def $cl killed $cl killed $ecx
; BMI1-NEXT: shrq %cl, %rdi
; BMI1-NEXT: movq %rdi, %rax
; BMI1-NEXT: retq
@@ -651,14 +651,14 @@
; BMI1-NEXT: movl $64, %ecx
; BMI1-NEXT: subl %esi, %ecx
; BMI1-NEXT: shlq %cl, %rdi
-; BMI1-NEXT: # kill: def %cl killed %cl killed %ecx
+; BMI1-NEXT: # kill: def $cl killed $cl killed $ecx
; BMI1-NEXT: shrq %cl, %rdi
; BMI1-NEXT: movq %rdi, %rax
; BMI1-NEXT: retq
;
; BMI2-LABEL: bzhi64f:
; BMI2: # %bb.0: # %entry
-; BMI2-NEXT: # kill: def %esi killed %esi def %rsi
+; BMI2-NEXT: # kill: def $esi killed $esi def $rsi
; BMI2-NEXT: bzhiq %rsi, %rdi, %rax
; BMI2-NEXT: retq
entry:
diff --git a/llvm/test/CodeGen/X86/bool-simplify.ll b/llvm/test/CodeGen/X86/bool-simplify.ll
index 87929ad..60931a7 100644
--- a/llvm/test/CodeGen/X86/bool-simplify.ll
+++ b/llvm/test/CodeGen/X86/bool-simplify.ll
@@ -55,7 +55,7 @@
; CHECK-NEXT: rdrandw %cx
; CHECK-NEXT: cmovbw %di, %ax
; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%1 = tail call { i16, i32 } @llvm.x86.rdrand.16() nounwind
%2 = extractvalue { i16, i32 } %1, 0
@@ -107,7 +107,7 @@
; CHECK-NEXT: rdseedw %cx
; CHECK-NEXT: cmovbw %di, %ax
; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%1 = tail call { i16, i32 } @llvm.x86.rdseed.16() nounwind
%2 = extractvalue { i16, i32 } %1, 0
diff --git a/llvm/test/CodeGen/X86/bool-vector.ll b/llvm/test/CodeGen/X86/bool-vector.ll
index ec9e42fc..88fe0a7 100644
--- a/llvm/test/CodeGen/X86/bool-vector.ll
+++ b/llvm/test/CodeGen/X86/bool-vector.ll
@@ -138,10 +138,10 @@
;
; X64-LABEL: PR15215_good:
; X64: # %bb.0: # %entry
-; X64-NEXT: # kill: def %ecx killed %ecx def %rcx
-; X64-NEXT: # kill: def %edx killed %edx def %rdx
-; X64-NEXT: # kill: def %esi killed %esi def %rsi
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $ecx killed $ecx def $rcx
+; X64-NEXT: # kill: def $edx killed $edx def $rdx
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: andl $1, %edi
; X64-NEXT: andl $1, %esi
; X64-NEXT: andl $1, %edx
diff --git a/llvm/test/CodeGen/X86/branchfolding-undef.mir b/llvm/test/CodeGen/X86/branchfolding-undef.mir
index 1062b34..d6fb427 100644
--- a/llvm/test/CodeGen/X86/branchfolding-undef.mir
+++ b/llvm/test/CodeGen/X86/branchfolding-undef.mir
@@ -7,22 +7,22 @@
---
# CHECK-LABEL: name: func
# CHECK: bb.1:
-# CHECK: %eax = MOV32ri 2
+# CHECK: $eax = MOV32ri 2
# CHECK-NOT: RET
# CHECK: bb.2:
-# CHECK-NOT: RET 0, undef %eax
-# CHECK: RET 0, %eax
+# CHECK-NOT: RET 0, undef $eax
+# CHECK: RET 0, $eax
name: func
tracksRegLiveness: true
body: |
bb.0:
- JE_1 %bb.1, implicit undef %eflags
+ JE_1 %bb.1, implicit undef $eflags
JMP_1 %bb.2
bb.1:
- %eax = MOV32ri 2
- RET 0, %eax
+ $eax = MOV32ri 2
+ RET 0, $eax
bb.2:
- RET 0, undef %eax
+ RET 0, undef $eax
...
diff --git a/llvm/test/CodeGen/X86/broadcastm-lowering.ll b/llvm/test/CodeGen/X86/broadcastm-lowering.ll
index 6c236af..243ff14 100644
--- a/llvm/test/CodeGen/X86/broadcastm-lowering.ll
+++ b/llvm/test/CodeGen/X86/broadcastm-lowering.ll
@@ -104,8 +104,8 @@
define <8 x i64> @test_mm512_epi64(<8 x i32> %a, <8 x i32> %b) {
; AVX512CD-LABEL: test_mm512_epi64:
; AVX512CD: # %bb.0: # %entry
-; AVX512CD-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512CD-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512CD-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512CD-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; AVX512CD-NEXT: vpbroadcastmb2q %k0, %zmm0
; AVX512CD-NEXT: retq
@@ -136,8 +136,8 @@
define <4 x i64> @test_mm256_epi64(<8 x i32> %a, <8 x i32> %b) {
; AVX512CD-LABEL: test_mm256_epi64:
; AVX512CD: # %bb.0: # %entry
-; AVX512CD-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512CD-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512CD-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512CD-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; AVX512CD-NEXT: kmovw %k0, %eax
; AVX512CD-NEXT: vpxor %xmm0, %xmm0, %xmm0
diff --git a/llvm/test/CodeGen/X86/bypass-slow-division-32.ll b/llvm/test/CodeGen/X86/bypass-slow-division-32.ll
index c5a5191..6ab6222 100644
--- a/llvm/test/CodeGen/X86/bypass-slow-division-32.ll
+++ b/llvm/test/CodeGen/X86/bypass-slow-division-32.ll
@@ -17,7 +17,7 @@
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB0_1:
; CHECK-NEXT: movzbl %al, %eax
-; CHECK-NEXT: # kill: def %eax killed %eax def %ax
+; CHECK-NEXT: # kill: def $eax killed $eax def $ax
; CHECK-NEXT: divb %cl
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: retl
@@ -41,7 +41,7 @@
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB1_1:
; CHECK-NEXT: movzbl %al, %eax
-; CHECK-NEXT: # kill: def %eax killed %eax def %ax
+; CHECK-NEXT: # kill: def $eax killed $eax def $ax
; CHECK-NEXT: divb %cl
; CHECK-NEXT: movzbl %ah, %eax
; CHECK-NEXT: retl
@@ -65,7 +65,7 @@
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB2_1:
; CHECK-NEXT: movzbl %al, %eax
-; CHECK-NEXT: # kill: def %eax killed %eax def %ax
+; CHECK-NEXT: # kill: def $eax killed $eax def $ax
; CHECK-NEXT: divb %cl
; CHECK-NEXT: movzbl %ah, %edx
; CHECK-NEXT: movzbl %al, %eax
@@ -103,14 +103,14 @@
; CHECK-NEXT: jmp .LBB3_6
; CHECK-NEXT: .LBB3_1:
; CHECK-NEXT: movzbl %cl, %eax
-; CHECK-NEXT: # kill: def %eax killed %eax def %ax
+; CHECK-NEXT: # kill: def $eax killed $eax def $ax
; CHECK-NEXT: divb %bl
; CHECK-NEXT: movzbl %al, %esi
; CHECK-NEXT: testl $-256, %edi
; CHECK-NEXT: jne .LBB3_5
; CHECK-NEXT: .LBB3_4:
; CHECK-NEXT: movzbl %cl, %eax
-; CHECK-NEXT: # kill: def %eax killed %eax def %ax
+; CHECK-NEXT: # kill: def $eax killed $eax def $ax
; CHECK-NEXT: divb %bl
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: .LBB3_6:
@@ -208,7 +208,7 @@
; CHECK-NEXT: .LBB8_1:
; CHECK-NEXT: movb $4, %al
; CHECK-NEXT: movzbl %al, %eax
-; CHECK-NEXT: # kill: def %eax killed %eax def %ax
+; CHECK-NEXT: # kill: def $eax killed $eax def $ax
; CHECK-NEXT: divb %cl
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: retl
@@ -230,7 +230,7 @@
; CHECK-NEXT: .LBB9_1:
; CHECK-NEXT: movb $4, %al
; CHECK-NEXT: movzbl %al, %eax
-; CHECK-NEXT: # kill: def %eax killed %eax def %ax
+; CHECK-NEXT: # kill: def $eax killed $eax def $ax
; CHECK-NEXT: divb %cl
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: retl
diff --git a/llvm/test/CodeGen/X86/bypass-slow-division-64.ll b/llvm/test/CodeGen/X86/bypass-slow-division-64.ll
index cf5cd70..bed775d 100644
--- a/llvm/test/CodeGen/X86/bypass-slow-division-64.ll
+++ b/llvm/test/CodeGen/X86/bypass-slow-division-64.ll
@@ -20,7 +20,7 @@
; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: divl %esi
-; CHECK-NEXT: # kill: def %eax killed %eax def %rax
+; CHECK-NEXT: # kill: def $eax killed $eax def $rax
; CHECK-NEXT: retq
%result = sdiv i64 %a, %b
ret i64 %result
@@ -43,7 +43,7 @@
; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: divl %esi
-; CHECK-NEXT: # kill: def %edx killed %edx def %rdx
+; CHECK-NEXT: # kill: def $edx killed $edx def $rdx
; CHECK-NEXT: movq %rdx, %rax
; CHECK-NEXT: retq
%result = srem i64 %a, %b
@@ -67,8 +67,8 @@
; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: divl %esi
-; CHECK-NEXT: # kill: def %edx killed %edx def %rdx
-; CHECK-NEXT: # kill: def %eax killed %eax def %rax
+; CHECK-NEXT: # kill: def $edx killed $edx def $rdx
+; CHECK-NEXT: # kill: def $eax killed $eax def $rax
; CHECK-NEXT: addq %rdx, %rax
; CHECK-NEXT: retq
%resultdiv = sdiv i64 %a, %b
diff --git a/llvm/test/CodeGen/X86/clz.ll b/llvm/test/CodeGen/X86/clz.ll
index bd63a80..d76741c 100644
--- a/llvm/test/CodeGen/X86/clz.ll
+++ b/llvm/test/CodeGen/X86/clz.ll
@@ -19,28 +19,28 @@
; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: bsfl %eax, %eax
-; X32-NEXT: # kill: def %al killed %al killed %eax
+; X32-NEXT: # kill: def $al killed $al killed $eax
; X32-NEXT: retl
;
; X64-LABEL: cttz_i8:
; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: bsfl %eax, %eax
-; X64-NEXT: # kill: def %al killed %al killed %eax
+; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
;
; X32-CLZ-LABEL: cttz_i8:
; X32-CLZ: # %bb.0:
; X32-CLZ-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: tzcntl %eax, %eax
-; X32-CLZ-NEXT: # kill: def %al killed %al killed %eax
+; X32-CLZ-NEXT: # kill: def $al killed $al killed $eax
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i8:
; X64-CLZ: # %bb.0:
; X64-CLZ-NEXT: movzbl %dil, %eax
; X64-CLZ-NEXT: tzcntl %eax, %eax
-; X64-CLZ-NEXT: # kill: def %al killed %al killed %eax
+; X64-CLZ-NEXT: # kill: def $al killed $al killed $eax
; X64-CLZ-NEXT: retq
%tmp = call i8 @llvm.cttz.i8( i8 %x, i1 true )
ret i8 %tmp
@@ -144,7 +144,7 @@
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: bsrl %eax, %eax
; X32-NEXT: xorl $7, %eax
-; X32-NEXT: # kill: def %al killed %al killed %eax
+; X32-NEXT: # kill: def $al killed $al killed $eax
; X32-NEXT: retl
;
; X64-LABEL: ctlz_i8:
@@ -152,7 +152,7 @@
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: bsrl %eax, %eax
; X64-NEXT: xorl $7, %eax
-; X64-NEXT: # kill: def %al killed %al killed %eax
+; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
;
; X32-CLZ-LABEL: ctlz_i8:
@@ -160,7 +160,7 @@
; X32-CLZ-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: lzcntl %eax, %eax
; X32-CLZ-NEXT: addl $-24, %eax
-; X32-CLZ-NEXT: # kill: def %al killed %al killed %eax
+; X32-CLZ-NEXT: # kill: def $al killed $al killed $eax
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i8:
@@ -168,7 +168,7 @@
; X64-CLZ-NEXT: movzbl %dil, %eax
; X64-CLZ-NEXT: lzcntl %eax, %eax
; X64-CLZ-NEXT: addl $-24, %eax
-; X64-CLZ-NEXT: # kill: def %al killed %al killed %eax
+; X64-CLZ-NEXT: # kill: def $al killed $al killed $eax
; X64-CLZ-NEXT: retq
%tmp2 = call i8 @llvm.ctlz.i8( i8 %x, i1 true )
ret i8 %tmp2
@@ -179,14 +179,14 @@
; X32: # %bb.0:
; X32-NEXT: bsrw {{[0-9]+}}(%esp), %ax
; X32-NEXT: xorl $15, %eax
-; X32-NEXT: # kill: def %ax killed %ax killed %eax
+; X32-NEXT: # kill: def $ax killed $ax killed $eax
; X32-NEXT: retl
;
; X64-LABEL: ctlz_i16:
; X64: # %bb.0:
; X64-NEXT: bsrw %di, %ax
; X64-NEXT: xorl $15, %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
;
; X32-CLZ-LABEL: ctlz_i16:
@@ -286,11 +286,11 @@
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: bsrl %eax, %eax
; X32-NEXT: xorl $7, %eax
-; X32-NEXT: # kill: def %al killed %al killed %eax
+; X32-NEXT: # kill: def $al killed $al killed $eax
; X32-NEXT: retl
; X32-NEXT: .LBB8_1:
; X32-NEXT: movb $8, %al
-; X32-NEXT: # kill: def %al killed %al killed %eax
+; X32-NEXT: # kill: def $al killed $al killed $eax
; X32-NEXT: retl
;
; X64-LABEL: ctlz_i8_zero_test:
@@ -301,11 +301,11 @@
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: bsrl %eax, %eax
; X64-NEXT: xorl $7, %eax
-; X64-NEXT: # kill: def %al killed %al killed %eax
+; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
; X64-NEXT: .LBB8_1:
; X64-NEXT: movb $8, %al
-; X64-NEXT: # kill: def %al killed %al killed %eax
+; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
;
; X32-CLZ-LABEL: ctlz_i8_zero_test:
@@ -313,7 +313,7 @@
; X32-CLZ-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: lzcntl %eax, %eax
; X32-CLZ-NEXT: addl $-24, %eax
-; X32-CLZ-NEXT: # kill: def %al killed %al killed %eax
+; X32-CLZ-NEXT: # kill: def $al killed $al killed $eax
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i8_zero_test:
@@ -321,7 +321,7 @@
; X64-CLZ-NEXT: movzbl %dil, %eax
; X64-CLZ-NEXT: lzcntl %eax, %eax
; X64-CLZ-NEXT: addl $-24, %eax
-; X64-CLZ-NEXT: # kill: def %al killed %al killed %eax
+; X64-CLZ-NEXT: # kill: def $al killed $al killed $eax
; X64-CLZ-NEXT: retq
%tmp1 = call i8 @llvm.ctlz.i8(i8 %n, i1 false)
ret i8 %tmp1
@@ -337,11 +337,11 @@
; X32-NEXT: # %bb.2: # %cond.false
; X32-NEXT: bsrw %ax, %ax
; X32-NEXT: xorl $15, %eax
-; X32-NEXT: # kill: def %ax killed %ax killed %eax
+; X32-NEXT: # kill: def $ax killed $ax killed $eax
; X32-NEXT: retl
; X32-NEXT: .LBB9_1:
; X32-NEXT: movw $16, %ax
-; X32-NEXT: # kill: def %ax killed %ax killed %eax
+; X32-NEXT: # kill: def $ax killed $ax killed $eax
; X32-NEXT: retl
;
; X64-LABEL: ctlz_i16_zero_test:
@@ -351,11 +351,11 @@
; X64-NEXT: # %bb.2: # %cond.false
; X64-NEXT: bsrw %di, %ax
; X64-NEXT: xorl $15, %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
; X64-NEXT: .LBB9_1:
; X64-NEXT: movw $16, %ax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
;
; X32-CLZ-LABEL: ctlz_i16_zero_test:
@@ -480,11 +480,11 @@
; X32-NEXT: # %bb.2: # %cond.false
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: bsfl %eax, %eax
-; X32-NEXT: # kill: def %al killed %al killed %eax
+; X32-NEXT: # kill: def $al killed $al killed $eax
; X32-NEXT: retl
; X32-NEXT: .LBB12_1
; X32-NEXT: movb $8, %al
-; X32-NEXT: # kill: def %al killed %al killed %eax
+; X32-NEXT: # kill: def $al killed $al killed $eax
; X32-NEXT: retl
;
; X64-LABEL: cttz_i8_zero_test:
@@ -494,11 +494,11 @@
; X64-NEXT: # %bb.2: # %cond.false
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: bsfl %eax, %eax
-; X64-NEXT: # kill: def %al killed %al killed %eax
+; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
; X64-NEXT: .LBB12_1:
; X64-NEXT: movb $8, %al
-; X64-NEXT: # kill: def %al killed %al killed %eax
+; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
;
; X32-CLZ-LABEL: cttz_i8_zero_test:
@@ -506,7 +506,7 @@
; X32-CLZ-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: orl $256, %eax # imm = 0x100
; X32-CLZ-NEXT: tzcntl %eax, %eax
-; X32-CLZ-NEXT: # kill: def %al killed %al killed %eax
+; X32-CLZ-NEXT: # kill: def $al killed $al killed $eax
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i8_zero_test:
@@ -514,7 +514,7 @@
; X64-CLZ-NEXT: movzbl %dil, %eax
; X64-CLZ-NEXT: orl $256, %eax # imm = 0x100
; X64-CLZ-NEXT: tzcntl %eax, %eax
-; X64-CLZ-NEXT: # kill: def %al killed %al killed %eax
+; X64-CLZ-NEXT: # kill: def $al killed $al killed $eax
; X64-CLZ-NEXT: retq
%tmp1 = call i8 @llvm.cttz.i8(i8 %n, i1 false)
ret i8 %tmp1
@@ -786,7 +786,7 @@
; X32-NEXT: orb $2, %al
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: bsfl %eax, %eax
-; X32-NEXT: # kill: def %al killed %al killed %eax
+; X32-NEXT: # kill: def $al killed $al killed $eax
; X32-NEXT: retl
;
; X64-LABEL: cttz_i8_knownbits:
@@ -794,7 +794,7 @@
; X64-NEXT: orb $2, %dil
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: bsfl %eax, %eax
-; X64-NEXT: # kill: def %al killed %al killed %eax
+; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
;
; X32-CLZ-LABEL: cttz_i8_knownbits:
@@ -803,7 +803,7 @@
; X32-CLZ-NEXT: orb $2, %al
; X32-CLZ-NEXT: movzbl %al, %eax
; X32-CLZ-NEXT: tzcntl %eax, %eax
-; X32-CLZ-NEXT: # kill: def %al killed %al killed %eax
+; X32-CLZ-NEXT: # kill: def $al killed $al killed $eax
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i8_knownbits:
@@ -811,7 +811,7 @@
; X64-CLZ-NEXT: orb $2, %dil
; X64-CLZ-NEXT: movzbl %dil, %eax
; X64-CLZ-NEXT: tzcntl %eax, %eax
-; X64-CLZ-NEXT: # kill: def %al killed %al killed %eax
+; X64-CLZ-NEXT: # kill: def $al killed $al killed $eax
; X64-CLZ-NEXT: retq
%x2 = or i8 %x, 2
%tmp = call i8 @llvm.cttz.i8(i8 %x2, i1 true )
@@ -827,7 +827,7 @@
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: bsrl %eax, %eax
; X32-NEXT: xorl $7, %eax
-; X32-NEXT: # kill: def %al killed %al killed %eax
+; X32-NEXT: # kill: def $al killed $al killed $eax
; X32-NEXT: retl
;
; X64-LABEL: ctlz_i8_knownbits:
@@ -836,7 +836,7 @@
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: bsrl %eax, %eax
; X64-NEXT: xorl $7, %eax
-; X64-NEXT: # kill: def %al killed %al killed %eax
+; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
;
; X32-CLZ-LABEL: ctlz_i8_knownbits:
@@ -846,7 +846,7 @@
; X32-CLZ-NEXT: movzbl %al, %eax
; X32-CLZ-NEXT: lzcntl %eax, %eax
; X32-CLZ-NEXT: addl $-24, %eax
-; X32-CLZ-NEXT: # kill: def %al killed %al killed %eax
+; X32-CLZ-NEXT: # kill: def $al killed $al killed $eax
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i8_knownbits:
@@ -855,7 +855,7 @@
; X64-CLZ-NEXT: movzbl %dil, %eax
; X64-CLZ-NEXT: lzcntl %eax, %eax
; X64-CLZ-NEXT: addl $-24, %eax
-; X64-CLZ-NEXT: # kill: def %al killed %al killed %eax
+; X64-CLZ-NEXT: # kill: def $al killed $al killed $eax
; X64-CLZ-NEXT: retq
%x2 = or i8 %x, 64
diff --git a/llvm/test/CodeGen/X86/cmov-into-branch.ll b/llvm/test/CodeGen/X86/cmov-into-branch.ll
index c18a9ca..51c1ac5 100644
--- a/llvm/test/CodeGen/X86/cmov-into-branch.ll
+++ b/llvm/test/CodeGen/X86/cmov-into-branch.ll
@@ -65,7 +65,7 @@
define void @test6(i32 %a, i32 %x, i32* %y.ptr, i64* %z.ptr) {
; CHECK-LABEL: test6:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def %esi killed %esi def %rsi
+; CHECK-NEXT: # kill: def $esi killed $esi def $rsi
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: cmovnsl (%rdx), %esi
; CHECK-NEXT: movq %rsi, (%rcx)
diff --git a/llvm/test/CodeGen/X86/cmov-promotion.ll b/llvm/test/CodeGen/X86/cmov-promotion.ll
index 8e34b62..78a4612 100644
--- a/llvm/test/CodeGen/X86/cmov-promotion.ll
+++ b/llvm/test/CodeGen/X86/cmov-promotion.ll
@@ -12,7 +12,7 @@
; CMOV-NEXT: movb $-19, %al
; CMOV-NEXT: .LBB0_2:
; CMOV-NEXT: movzbl %al, %eax
-; CMOV-NEXT: # kill: def %ax killed %ax killed %eax
+; CMOV-NEXT: # kill: def $ax killed $ax killed $eax
; CMOV-NEXT: retq
;
; NO_CMOV-LABEL: cmov_zpromotion_8_to_16:
@@ -24,7 +24,7 @@
; NO_CMOV-NEXT: movb $-19, %al
; NO_CMOV-NEXT: .LBB0_2:
; NO_CMOV-NEXT: movzbl %al, %eax
-; NO_CMOV-NEXT: # kill: def %ax killed %ax killed %eax
+; NO_CMOV-NEXT: # kill: def $ax killed $ax killed $eax
; NO_CMOV-NEXT: retl
%t0 = select i1 %c, i8 117, i8 -19
%ret = zext i8 %t0 to i16
@@ -167,7 +167,7 @@
; CMOV-NEXT: movb $-19, %al
; CMOV-NEXT: .LBB6_2:
; CMOV-NEXT: movsbl %al, %eax
-; CMOV-NEXT: # kill: def %ax killed %ax killed %eax
+; CMOV-NEXT: # kill: def $ax killed $ax killed $eax
; CMOV-NEXT: retq
;
; NO_CMOV-LABEL: cmov_spromotion_8_to_16:
@@ -179,7 +179,7 @@
; NO_CMOV-NEXT: movb $-19, %al
; NO_CMOV-NEXT: .LBB6_2:
; NO_CMOV-NEXT: movsbl %al, %eax
-; NO_CMOV-NEXT: # kill: def %ax killed %ax killed %eax
+; NO_CMOV-NEXT: # kill: def $ax killed $ax killed $eax
; NO_CMOV-NEXT: retl
%t0 = select i1 %c, i8 117, i8 -19
%ret = sext i8 %t0 to i16
diff --git a/llvm/test/CodeGen/X86/cmov.ll b/llvm/test/CodeGen/X86/cmov.ll
index e860a59..859078a 100644
--- a/llvm/test/CodeGen/X86/cmov.ll
+++ b/llvm/test/CodeGen/X86/cmov.ll
@@ -83,7 +83,7 @@
; CHECK-NEXT: shrb $7, %al
; CHECK-NEXT: movzbl %al, %ecx
; CHECK-NEXT: xorl $1, %ecx
-; CHECK-NEXT: # kill: def %cl killed %cl killed %ecx
+; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
; CHECK-NEXT: sarl %cl, %edx
; CHECK-NEXT: movb {{.*}}(%rip), %al
; CHECK-NEXT: testb %al, %al
diff --git a/llvm/test/CodeGen/X86/combine-abs.ll b/llvm/test/CodeGen/X86/combine-abs.ll
index dd86753..ebe7def 100644
--- a/llvm/test/CodeGen/X86/combine-abs.ll
+++ b/llvm/test/CodeGen/X86/combine-abs.ll
@@ -77,9 +77,9 @@
;
; AVX512F-LABEL: combine_v4i64_abs_abs:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512F-NEXT: vpabsq %zmm0, %zmm0
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: combine_v4i64_abs_abs:
diff --git a/llvm/test/CodeGen/X86/compress_expand.ll b/llvm/test/CodeGen/X86/compress_expand.ll
index 57767e2..310926d 100644
--- a/llvm/test/CodeGen/X86/compress_expand.ll
+++ b/llvm/test/CodeGen/X86/compress_expand.ll
@@ -72,11 +72,11 @@
;
; KNL-LABEL: test4:
; KNL: # %bb.0:
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: movw $7, %ax
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1}
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
%res = call <4 x float> @llvm.masked.expandload.v4f32(float* %base, <4 x i1> <i1 true, i1 true, i1 true, i1 false>, <4 x float> %src0)
ret <4 x float>%res
@@ -92,11 +92,11 @@
;
; KNL-LABEL: test5:
; KNL: # %bb.0:
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: movb $2, %al
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpexpandq (%rdi), %zmm0 {%k1}
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
%res = call <2 x i64> @llvm.masked.expandload.v2i64(i64* %base, <2 x i1> <i1 false, i1 true>, <2 x i64> %src0)
ret <2 x i64>%res
@@ -137,7 +137,7 @@
;
; KNL-LABEL: test7:
; KNL: # %bb.0:
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpmovsxwq %xmm1, %zmm1
; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
@@ -198,7 +198,7 @@
;
; KNL-LABEL: test10:
; KNL: # %bb.0:
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpslld $31, %xmm1, %xmm1
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0
; KNL-NEXT: kshiftlw $12, %k0, %k0
@@ -219,7 +219,7 @@
;
; KNL-LABEL: test11:
; KNL: # %bb.0:
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: vpsllq $63, %xmm1, %xmm1
; KNL-NEXT: vptestmq %zmm1, %zmm1, %k0
; KNL-NEXT: kshiftlw $14, %k0, %k0
@@ -240,7 +240,7 @@
;
; KNL-LABEL: test12:
; KNL: # %bb.0:
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: vpslld $31, %xmm1, %xmm1
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0
; KNL-NEXT: kshiftlw $12, %k0, %k0
@@ -262,14 +262,14 @@
;
; KNL-LABEL: test13:
; KNL: # %bb.0:
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; KNL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
; KNL-NEXT: vptestnmq %zmm1, %zmm1, %k0
; KNL-NEXT: kshiftlw $14, %k0, %k0
; KNL-NEXT: kshiftrw $14, %k0, %k1
; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1}
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
%mask = icmp eq <2 x i32> %trigger, zeroinitializer
%res = call <2 x float> @llvm.masked.expandload.v2f32(float* %base, <2 x i1> %mask, <2 x float> %src0)
@@ -287,7 +287,7 @@
;
; KNL-LABEL: test14:
; KNL: # %bb.0:
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; KNL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
; KNL-NEXT: vptestnmq %zmm1, %zmm1, %k0
diff --git a/llvm/test/CodeGen/X86/conditional-tailcall-samedest.mir b/llvm/test/CodeGen/X86/conditional-tailcall-samedest.mir
index f975e6b..490109a 100644
--- a/llvm/test/CodeGen/X86/conditional-tailcall-samedest.mir
+++ b/llvm/test/CodeGen/X86/conditional-tailcall-samedest.mir
@@ -9,8 +9,8 @@
# CHECK: body: |
# CHECK: bb.0.entry:
# CHECK: successors: %bb.1(0x40000000)
-# CHECK: liveins: %edi
-# CHECK: CMP32ri8 killed %edi, 2, implicit-def %eflags
+# CHECK: liveins: $edi
+# CHECK: CMP32ri8 killed $edi, 2, implicit-def $eflags
# CHECK: TCRETURNdi64cc @mergeable_conditional_tailcall
# This was the unconditional branch to a dead MBB that we left behind before
@@ -78,7 +78,7 @@
tracksRegLiveness: true
registers:
liveins:
- - { reg: '%edi', virtual-reg: '' }
+ - { reg: '$edi', virtual-reg: '' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -102,36 +102,36 @@
body: |
bb.0.entry:
successors: %bb.2(0x40000000), %bb.1(0x40000000)
- liveins: %edi
+ liveins: $edi
- CMP32ri8 killed %edi, 2, implicit-def %eflags
- JB_1 %bb.2, implicit %eflags
+ CMP32ri8 killed $edi, 2, implicit-def $eflags
+ JB_1 %bb.2, implicit $eflags
JMP_1 %bb.1
bb.1.entry:
successors: %bb.4(0x40000000), %bb.5(0x40000000)
- liveins: %eflags
+ liveins: $eflags
- JE_1 %bb.4, implicit killed %eflags
+ JE_1 %bb.4, implicit killed $eflags
JMP_1 %bb.5
bb.2.sw.bb:
successors: %bb.3(0x00000800), %bb.6(0x7ffff800)
- %al = ACQUIRE_MOV8rm %rip, 1, %noreg, @static_local_guard, %noreg :: (volatile load acquire 1 from `i8* bitcast (i64* @static_local_guard to i8*)`, align 8)
- TEST8rr killed %al, %al, implicit-def %eflags
- JNE_1 %bb.6, implicit killed %eflags
+ $al = ACQUIRE_MOV8rm $rip, 1, $noreg, @static_local_guard, $noreg :: (volatile load acquire 1 from `i8* bitcast (i64* @static_local_guard to i8*)`, align 8)
+ TEST8rr killed $al, $al, implicit-def $eflags
+ JNE_1 %bb.6, implicit killed $eflags
JMP_1 %bb.3
bb.3.init.check.i:
- dead %edi = MOV32ri64 @static_local_guard, implicit-def %rdi
- TCRETURNdi64 @initialize_static_local, 0, csr_64, implicit %rsp, implicit %rdi
+ dead $edi = MOV32ri64 @static_local_guard, implicit-def $rdi
+ TCRETURNdi64 @initialize_static_local, 0, csr_64, implicit $rsp, implicit $rdi
bb.4.sw.bb2:
- TCRETURNdi64 @mergeable_conditional_tailcall, 0, csr_64, implicit %rsp
+ TCRETURNdi64 @mergeable_conditional_tailcall, 0, csr_64, implicit $rsp
bb.5.sw.epilog:
- TCRETURNdi64 @mergeable_conditional_tailcall, 0, csr_64, implicit %rsp
+ TCRETURNdi64 @mergeable_conditional_tailcall, 0, csr_64, implicit $rsp
bb.6.return:
RET 0
diff --git a/llvm/test/CodeGen/X86/critical-edge-split-2.ll b/llvm/test/CodeGen/X86/critical-edge-split-2.ll
index 4ebfddf..1371f84 100644
--- a/llvm/test/CodeGen/X86/critical-edge-split-2.ll
+++ b/llvm/test/CodeGen/X86/critical-edge-split-2.ll
@@ -25,7 +25,7 @@
; CHECK-NEXT: divl %esi
; CHECK-NEXT: movl %edx, %eax
; CHECK-NEXT: .LBB0_2: # %cond.end.i
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
entry:
br i1 %C, label %cond.end.i, label %cond.false.i
diff --git a/llvm/test/CodeGen/X86/ctpop-combine.ll b/llvm/test/CodeGen/X86/ctpop-combine.ll
index 40dc6c4..2f32677 100644
--- a/llvm/test/CodeGen/X86/ctpop-combine.ll
+++ b/llvm/test/CodeGen/X86/ctpop-combine.ll
@@ -55,7 +55,7 @@
; CHECK: # %bb.0:
; CHECK-NEXT: andl $127, %edi
; CHECK-NEXT: popcntl %edi, %eax
-; CHECK-NEXT: # kill: def %al killed %al killed %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%x2 = and i8 %x, 127
%count = tail call i8 @llvm.ctpop.i8(i8 %x2)
diff --git a/llvm/test/CodeGen/X86/dagcombine-cse.ll b/llvm/test/CodeGen/X86/dagcombine-cse.ll
index 544407e..bf642c4 100644
--- a/llvm/test/CodeGen/X86/dagcombine-cse.ll
+++ b/llvm/test/CodeGen/X86/dagcombine-cse.ll
@@ -19,8 +19,8 @@
;
; X64-LABEL: t:
; X64: ## %bb.0: ## %entry
-; X64-NEXT: ## kill: def %edx killed %edx def %rdx
-; X64-NEXT: ## kill: def %esi killed %esi def %rsi
+; X64-NEXT: ## kill: def $edx killed $edx def $rdx
+; X64-NEXT: ## kill: def $esi killed $esi def $rsi
; X64-NEXT: imull %ecx, %esi
; X64-NEXT: leal (%rsi,%rdx), %eax
; X64-NEXT: cltq
diff --git a/llvm/test/CodeGen/X86/divide-by-constant.ll b/llvm/test/CodeGen/X86/divide-by-constant.ll
index 5df6f16..ed6c0fa 100644
--- a/llvm/test/CodeGen/X86/divide-by-constant.ll
+++ b/llvm/test/CodeGen/X86/divide-by-constant.ll
@@ -8,14 +8,14 @@
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: imull $63551, %eax, %eax # imm = 0xF83F
; X32-NEXT: shrl $21, %eax
-; X32-NEXT: # kill: def %ax killed %ax killed %eax
+; X32-NEXT: # kill: def $ax killed $ax killed $eax
; X32-NEXT: retl
;
; X64-LABEL: test1:
; X64: # %bb.0: # %entry
; X64-NEXT: imull $63551, %edi, %eax # imm = 0xF83F
; X64-NEXT: shrl $21, %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
entry:
%div = udiv i16 %x, 33
@@ -28,14 +28,14 @@
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: imull $43691, %eax, %eax # imm = 0xAAAB
; X32-NEXT: shrl $17, %eax
-; X32-NEXT: # kill: def %ax killed %ax killed %eax
+; X32-NEXT: # kill: def $ax killed $ax killed $eax
; X32-NEXT: retl
;
; X64-LABEL: test2:
; X64: # %bb.0: # %entry
; X64-NEXT: imull $43691, %esi, %eax # imm = 0xAAAB
; X64-NEXT: shrl $17, %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
entry:
%div = udiv i16 %c, 3
@@ -49,14 +49,14 @@
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: imull $171, %eax, %eax
; X32-NEXT: shrl $9, %eax
-; X32-NEXT: # kill: def %al killed %al killed %eax
+; X32-NEXT: # kill: def $al killed $al killed $eax
; X32-NEXT: retl
;
; X64-LABEL: test3:
; X64: # %bb.0: # %entry
; X64-NEXT: imull $171, %esi, %eax
; X64-NEXT: shrl $9, %eax
-; X64-NEXT: # kill: def %al killed %al killed %eax
+; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
entry:
%div = udiv i8 %c, 3
@@ -72,7 +72,7 @@
; X32-NEXT: shrl $31, %ecx
; X32-NEXT: shrl $16, %eax
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: # kill: def %ax killed %ax killed %eax
+; X32-NEXT: # kill: def $ax killed $ax killed $eax
; X32-NEXT: retl
;
; X64-LABEL: test4:
@@ -82,7 +82,7 @@
; X64-NEXT: shrl $31, %ecx
; X64-NEXT: shrl $16, %eax
; X64-NEXT: addl %ecx, %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
entry:
%div = sdiv i16 %x, 33 ; <i32> [#uses=1]
@@ -103,7 +103,7 @@
; X64-NEXT: movl %edi, %eax
; X64-NEXT: imulq $365384439, %rax, %rax # imm = 0x15C752F7
; X64-NEXT: shrq $59, %rax
-; X64-NEXT: # kill: def %eax killed %eax killed %rax
+; X64-NEXT: # kill: def $eax killed $eax killed $rax
; X64-NEXT: retq
%tmp1 = udiv i32 %A, 1577682821 ; <i32> [#uses=1]
ret i32 %tmp1
@@ -118,7 +118,7 @@
; X32-NEXT: shrl $31, %ecx
; X32-NEXT: sarl $18, %eax
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: # kill: def %ax killed %ax killed %eax
+; X32-NEXT: # kill: def $ax killed $ax killed $eax
; X32-NEXT: retl
;
; X64-LABEL: test6:
@@ -128,7 +128,7 @@
; X64-NEXT: shrl $31, %ecx
; X64-NEXT: sarl $18, %eax
; X64-NEXT: addl %ecx, %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
entry:
%div = sdiv i16 %x, 10
@@ -147,11 +147,11 @@
;
; X64-LABEL: test7:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: shrl $2, %edi
; X64-NEXT: imulq $613566757, %rdi, %rax # imm = 0x24924925
; X64-NEXT: shrq $32, %rax
-; X64-NEXT: # kill: def %eax killed %eax killed %rax
+; X64-NEXT: # kill: def $eax killed $eax killed $rax
; X64-NEXT: retq
%div = udiv i32 %x, 28
ret i32 %div
@@ -166,7 +166,7 @@
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: imull $211, %eax, %eax
; X32-NEXT: shrl $13, %eax
-; X32-NEXT: # kill: def %al killed %al killed %eax
+; X32-NEXT: # kill: def $al killed $al killed $eax
; X32-NEXT: retl
;
; X64-LABEL: test8:
@@ -175,7 +175,7 @@
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: imull $211, %eax, %eax
; X64-NEXT: shrl $13, %eax
-; X64-NEXT: # kill: def %al killed %al killed %eax
+; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
%div = udiv i8 %x, 78
ret i8 %div
@@ -189,7 +189,7 @@
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: imull $71, %eax, %eax
; X32-NEXT: shrl $11, %eax
-; X32-NEXT: # kill: def %al killed %al killed %eax
+; X32-NEXT: # kill: def $al killed $al killed $eax
; X32-NEXT: retl
;
; X64-LABEL: test9:
@@ -198,7 +198,7 @@
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: imull $71, %eax, %eax
; X64-NEXT: shrl $11, %eax
-; X64-NEXT: # kill: def %al killed %al killed %eax
+; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
%div = udiv i8 %x, 116
ret i8 %div
diff --git a/llvm/test/CodeGen/X86/divrem.ll b/llvm/test/CodeGen/X86/divrem.ll
index 99d5261..67acba0 100644
--- a/llvm/test/CodeGen/X86/divrem.ll
+++ b/llvm/test/CodeGen/X86/divrem.ll
@@ -262,7 +262,7 @@
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: # kill: def %eax killed %eax def %ax
+; X32-NEXT: # kill: def $eax killed $eax def $ax
; X32-NEXT: divb {{[0-9]+}}(%esp)
; X32-NEXT: movzbl %ah, %ebx
; X32-NEXT: movb %al, (%edx)
@@ -273,7 +273,7 @@
; X64-LABEL: ui8:
; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: # kill: def %eax killed %eax def %ax
+; X64-NEXT: # kill: def $eax killed $eax def $ax
; X64-NEXT: divb %sil
; X64-NEXT: movzbl %ah, %esi
; X64-NEXT: movb %al, (%rdx)
diff --git a/llvm/test/CodeGen/X86/divrem8_ext.ll b/llvm/test/CodeGen/X86/divrem8_ext.ll
index 6b729ee..313aa86 100644
--- a/llvm/test/CodeGen/X86/divrem8_ext.ll
+++ b/llvm/test/CodeGen/X86/divrem8_ext.ll
@@ -6,7 +6,7 @@
; X32-LABEL: test_udivrem_zext_ah:
; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: # kill: def %eax killed %eax def %ax
+; X32-NEXT: # kill: def $eax killed $eax def $ax
; X32-NEXT: divb {{[0-9]+}}(%esp)
; X32-NEXT: movzbl %ah, %ecx
; X32-NEXT: movb %al, z
@@ -16,7 +16,7 @@
; X64-LABEL: test_udivrem_zext_ah:
; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: # kill: def %eax killed %eax def %ax
+; X64-NEXT: # kill: def $eax killed $eax def $ax
; X64-NEXT: divb %sil
; X64-NEXT: movzbl %ah, %ecx
; X64-NEXT: movb %al, {{.*}}(%rip)
@@ -32,19 +32,19 @@
; X32-LABEL: test_urem_zext_ah:
; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: # kill: def %eax killed %eax def %ax
+; X32-NEXT: # kill: def $eax killed $eax def $ax
; X32-NEXT: divb {{[0-9]+}}(%esp)
; X32-NEXT: movzbl %ah, %eax
-; X32-NEXT: # kill: def %al killed %al killed %eax
+; X32-NEXT: # kill: def $al killed $al killed $eax
; X32-NEXT: retl
;
; X64-LABEL: test_urem_zext_ah:
; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: # kill: def %eax killed %eax def %ax
+; X64-NEXT: # kill: def $eax killed $eax def $ax
; X64-NEXT: divb %sil
; X64-NEXT: movzbl %ah, %eax
-; X64-NEXT: # kill: def %al killed %al killed %eax
+; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
%1 = urem i8 %x, %y
ret i8 %1
@@ -55,21 +55,21 @@
; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: # kill: def %eax killed %eax def %ax
+; X32-NEXT: # kill: def $eax killed $eax def $ax
; X32-NEXT: divb %cl
; X32-NEXT: movzbl %ah, %eax
; X32-NEXT: addb %cl, %al
-; X32-NEXT: # kill: def %al killed %al killed %eax
+; X32-NEXT: # kill: def $al killed $al killed $eax
; X32-NEXT: retl
;
; X64-LABEL: test_urem_noext_ah:
; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: # kill: def %eax killed %eax def %ax
+; X64-NEXT: # kill: def $eax killed $eax def $ax
; X64-NEXT: divb %sil
; X64-NEXT: movzbl %ah, %eax
; X64-NEXT: addb %sil, %al
-; X64-NEXT: # kill: def %al killed %al killed %eax
+; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
%1 = urem i8 %x, %y
%2 = add i8 %1, %y
@@ -80,7 +80,7 @@
; X32-LABEL: test_urem_zext64_ah:
; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: # kill: def %eax killed %eax def %ax
+; X32-NEXT: # kill: def $eax killed $eax def $ax
; X32-NEXT: divb {{[0-9]+}}(%esp)
; X32-NEXT: movzbl %ah, %eax
; X32-NEXT: xorl %edx, %edx
@@ -89,7 +89,7 @@
; X64-LABEL: test_urem_zext64_ah:
; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: # kill: def %eax killed %eax def %ax
+; X64-NEXT: # kill: def $eax killed $eax def $ax
; X64-NEXT: divb %sil
; X64-NEXT: movzbl %ah, %eax
; X64-NEXT: retq
@@ -131,7 +131,7 @@
; X32-NEXT: cbtw
; X32-NEXT: idivb {{[0-9]+}}(%esp)
; X32-NEXT: movsbl %ah, %eax
-; X32-NEXT: # kill: def %al killed %al killed %eax
+; X32-NEXT: # kill: def $al killed $al killed $eax
; X32-NEXT: retl
;
; X64-LABEL: test_srem_sext_ah:
@@ -140,7 +140,7 @@
; X64-NEXT: cbtw
; X64-NEXT: idivb %sil
; X64-NEXT: movsbl %ah, %eax
-; X64-NEXT: # kill: def %al killed %al killed %eax
+; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
%1 = srem i8 %x, %y
ret i8 %1
@@ -155,7 +155,7 @@
; X32-NEXT: idivb %cl
; X32-NEXT: movsbl %ah, %eax
; X32-NEXT: addb %cl, %al
-; X32-NEXT: # kill: def %al killed %al killed %eax
+; X32-NEXT: # kill: def $al killed $al killed $eax
; X32-NEXT: retl
;
; X64-LABEL: test_srem_noext_ah:
@@ -165,7 +165,7 @@
; X64-NEXT: idivb %sil
; X64-NEXT: movsbl %ah, %eax
; X64-NEXT: addb %sil, %al
-; X64-NEXT: # kill: def %al killed %al killed %eax
+; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
%1 = srem i8 %x, %y
%2 = add i8 %1, %y
@@ -200,7 +200,7 @@
; X32-LABEL: pr25754:
; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: # kill: def %eax killed %eax def %ax
+; X32-NEXT: # kill: def $eax killed $eax def $ax
; X32-NEXT: divb {{[0-9]+}}(%esp)
; X32-NEXT: movzbl %ah, %ecx
; X32-NEXT: movzbl %al, %eax
@@ -211,7 +211,7 @@
; X64-LABEL: pr25754:
; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: # kill: def %eax killed %eax def %ax
+; X64-NEXT: # kill: def $eax killed $eax def $ax
; X64-NEXT: divb %sil
; X64-NEXT: movzbl %ah, %ecx
; X64-NEXT: movzbl %al, %eax
diff --git a/llvm/test/CodeGen/X86/domain-reassignment.mir b/llvm/test/CodeGen/X86/domain-reassignment.mir
index 3cb4b5d..b000904 100644
--- a/llvm/test/CodeGen/X86/domain-reassignment.mir
+++ b/llvm/test/CodeGen/X86/domain-reassignment.mir
@@ -80,14 +80,14 @@
- { id: 21, class: fr128, preferred-register: '' }
- { id: 22, class: fr32x, preferred-register: '' }
liveins:
- - { reg: '%edi', virtual-reg: '%3' }
- - { reg: '%rsi', virtual-reg: '%4' }
- - { reg: '%xmm0', virtual-reg: '%5' }
- - { reg: '%xmm1', virtual-reg: '%6' }
- - { reg: '%xmm2', virtual-reg: '%7' }
- - { reg: '%xmm3', virtual-reg: '%8' }
- - { reg: '%xmm4', virtual-reg: '%9' }
- - { reg: '%xmm5', virtual-reg: '%10' }
+ - { reg: '$edi', virtual-reg: '%3' }
+ - { reg: '$rsi', virtual-reg: '%4' }
+ - { reg: '$xmm0', virtual-reg: '%5' }
+ - { reg: '$xmm1', virtual-reg: '%6' }
+ - { reg: '$xmm2', virtual-reg: '%7' }
+ - { reg: '$xmm3', virtual-reg: '%8' }
+ - { reg: '$xmm4', virtual-reg: '%9' }
+ - { reg: '$xmm5', virtual-reg: '%10' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -111,19 +111,19 @@
body: |
bb.0.entry:
successors: %bb.1(0x40000000), %bb.2(0x40000000)
- liveins: %edi, %rsi, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5
+ liveins: $edi, $rsi, $xmm0, $xmm1, $xmm2, $xmm3, $xmm4, $xmm5
- %10 = COPY %xmm5
- %9 = COPY %xmm4
- %8 = COPY %xmm3
- %7 = COPY %xmm2
- %6 = COPY %xmm1
- %5 = COPY %xmm0
- %4 = COPY %rsi
- %3 = COPY %edi
+ %10 = COPY $xmm5
+ %9 = COPY $xmm4
+ %8 = COPY $xmm3
+ %7 = COPY $xmm2
+ %6 = COPY $xmm1
+ %5 = COPY $xmm0
+ %4 = COPY $rsi
+ %3 = COPY $edi
%11 = COPY %3.sub_8bit
- TEST8ri killed %11, 1, implicit-def %eflags
- JE_1 %bb.2, implicit %eflags
+ TEST8ri killed %11, 1, implicit-def $eflags
+ JE_1 %bb.2, implicit $eflags
JMP_1 %bb.1
bb.1.if:
@@ -165,7 +165,7 @@
%21 = IMPLICIT_DEF
%20 = VMOVSSZrrk %19, killed %18, killed %21, %5
%22 = COPY %20
- VMOVSSZmr %4, 1, %noreg, 0, %noreg, killed %22 :: (store 4 into %ir.fptr)
+ VMOVSSZmr %4, 1, $noreg, 0, $noreg, killed %22 :: (store 4 into %ir.fptr)
RET 0
...
@@ -199,11 +199,11 @@
- { id: 17, class: gr8, preferred-register: '' }
- { id: 18, class: gr8, preferred-register: '' }
liveins:
- - { reg: '%rdi', virtual-reg: '%0' }
- - { reg: '%zmm0', virtual-reg: '%1' }
- - { reg: '%zmm1', virtual-reg: '%2' }
- - { reg: '%zmm2', virtual-reg: '%3' }
- - { reg: '%zmm3', virtual-reg: '%4' }
+ - { reg: '$rdi', virtual-reg: '%0' }
+ - { reg: '$zmm0', virtual-reg: '%1' }
+ - { reg: '$zmm1', virtual-reg: '%2' }
+ - { reg: '$zmm2', virtual-reg: '%3' }
+ - { reg: '$zmm3', virtual-reg: '%4' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -226,13 +226,13 @@
constants:
body: |
bb.0:
- liveins: %rdi, %zmm0, %zmm1, %zmm2, %zmm3
+ liveins: $rdi, $zmm0, $zmm1, $zmm2, $zmm3
- %0 = COPY %rdi
- %1 = COPY %zmm0
- %2 = COPY %zmm1
- %3 = COPY %zmm2
- %4 = COPY %zmm3
+ %0 = COPY $rdi
+ %1 = COPY $zmm0
+ %2 = COPY $zmm1
+ %3 = COPY $zmm2
+ %4 = COPY $zmm3
%5 = VCMPPDZrri %3, %4, 0
; CHECK: %6:vk32 = COPY %5
@@ -247,13 +247,13 @@
; CHECK: %16:vk8 = KANDBrr %15, %13
; CHECK: %17:vk8 = KXORBrr %16, %12
; CHECK: %18:vk8 = KADDBrr %17, %14
- %12 = SHR8ri %7, 2, implicit-def dead %eflags
- %13 = SHL8ri %12, 1, implicit-def dead %eflags
+ %12 = SHR8ri %7, 2, implicit-def dead $eflags
+ %13 = SHL8ri %12, 1, implicit-def dead $eflags
%14 = NOT8r %13
- %15 = OR8rr %14, %12, implicit-def dead %eflags
- %16 = AND8rr %15, %13, implicit-def dead %eflags
- %17 = XOR8rr %16, %12, implicit-def dead %eflags
- %18 = ADD8rr %17, %14, implicit-def dead %eflags
+ %15 = OR8rr %14, %12, implicit-def dead $eflags
+ %16 = AND8rr %15, %13, implicit-def dead $eflags
+ %17 = XOR8rr %16, %12, implicit-def dead $eflags
+ %18 = ADD8rr %17, %14, implicit-def dead $eflags
; CHECK: %9:vk32 = COPY %18
; CHECK: %10:vk8wm = COPY %9
@@ -261,11 +261,11 @@
%9 = INSERT_SUBREG %8, %18, 1
%10 = COPY %9
%11 = VMOVAPDZrrk %2, killed %10, %1
- VMOVAPDZmr %0, 1, %noreg, 0, %noreg, killed %11
+ VMOVAPDZmr %0, 1, $noreg, 0, $noreg, killed %11
- ; CHECK: KTESTBrr %18, %18, implicit-def %eflags
- TEST8rr %18, %18, implicit-def %eflags
- JE_1 %bb.1, implicit %eflags
+ ; CHECK: KTESTBrr %18, %18, implicit-def $eflags
+ TEST8rr %18, %18, implicit-def $eflags
+ JE_1 %bb.1, implicit $eflags
JMP_1 %bb.2
bb.1:
@@ -303,11 +303,11 @@
- { id: 16, class: gr16, preferred-register: '' }
- { id: 17, class: gr16, preferred-register: '' }
liveins:
- - { reg: '%rdi', virtual-reg: '%0' }
- - { reg: '%zmm0', virtual-reg: '%1' }
- - { reg: '%zmm1', virtual-reg: '%2' }
- - { reg: '%zmm2', virtual-reg: '%3' }
- - { reg: '%zmm3', virtual-reg: '%4' }
+ - { reg: '$rdi', virtual-reg: '%0' }
+ - { reg: '$zmm0', virtual-reg: '%1' }
+ - { reg: '$zmm1', virtual-reg: '%2' }
+ - { reg: '$zmm2', virtual-reg: '%3' }
+ - { reg: '$zmm3', virtual-reg: '%4' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -330,13 +330,13 @@
constants:
body: |
bb.0:
- liveins: %rdi, %zmm0, %zmm1, %zmm2, %zmm3
+ liveins: $rdi, $zmm0, $zmm1, $zmm2, $zmm3
- %0 = COPY %rdi
- %1 = COPY %zmm0
- %2 = COPY %zmm1
- %3 = COPY %zmm2
- %4 = COPY %zmm3
+ %0 = COPY $rdi
+ %1 = COPY $zmm0
+ %2 = COPY $zmm1
+ %3 = COPY $zmm2
+ %4 = COPY $zmm3
%5 = VCMPPSZrri %3, %4, 0
; CHECK: %6:vk32 = COPY %5
@@ -350,12 +350,12 @@
; CHECK: %15:vk16 = KORWrr %14, %12
; CHECK: %16:vk16 = KANDWrr %15, %13
; CHECK: %17:vk16 = KXORWrr %16, %12
- %12 = SHR16ri %7, 2, implicit-def dead %eflags
- %13 = SHL16ri %12, 1, implicit-def dead %eflags
+ %12 = SHR16ri %7, 2, implicit-def dead $eflags
+ %13 = SHL16ri %12, 1, implicit-def dead $eflags
%14 = NOT16r %13
- %15 = OR16rr %14, %12, implicit-def dead %eflags
- %16 = AND16rr %15, %13, implicit-def dead %eflags
- %17 = XOR16rr %16, %12, implicit-def dead %eflags
+ %15 = OR16rr %14, %12, implicit-def dead $eflags
+ %16 = AND16rr %15, %13, implicit-def dead $eflags
+ %17 = XOR16rr %16, %12, implicit-def dead $eflags
; CHECK: %9:vk32 = COPY %17
; CHECK: %10:vk16wm = COPY %9
@@ -363,11 +363,11 @@
%9 = INSERT_SUBREG %8, %17, 3
%10 = COPY %9
%11 = VMOVAPSZrrk %2, killed %10, %1
- VMOVAPSZmr %0, 1, %noreg, 0, %noreg, killed %11
+ VMOVAPSZmr %0, 1, $noreg, 0, $noreg, killed %11
- ; CHECK: KTESTWrr %17, %17, implicit-def %eflags
- TEST16rr %17, %17, implicit-def %eflags
- JE_1 %bb.1, implicit %eflags
+ ; CHECK: KTESTWrr %17, %17, implicit-def $eflags
+ TEST16rr %17, %17, implicit-def $eflags
+ JE_1 %bb.1, implicit $eflags
JMP_1 %bb.2
bb.1:
@@ -401,9 +401,9 @@
- { id: 12, class: gr32, preferred-register: '' }
- { id: 13, class: gr32, preferred-register: '' }
liveins:
- - { reg: '%rdi', virtual-reg: '%0' }
- - { reg: '%zmm0', virtual-reg: '%1' }
- - { reg: '%zmm1', virtual-reg: '%2' }
+ - { reg: '$rdi', virtual-reg: '%0' }
+ - { reg: '$zmm0', virtual-reg: '%1' }
+ - { reg: '$zmm1', virtual-reg: '%2' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -426,13 +426,13 @@
constants:
body: |
bb.0:
- liveins: %rdi, %zmm0, %zmm1
+ liveins: $rdi, $zmm0, $zmm1
- %0 = COPY %rdi
- %1 = COPY %zmm0
- %2 = COPY %zmm1
+ %0 = COPY $rdi
+ %1 = COPY $zmm0
+ %2 = COPY $zmm1
- ; CHECK: %5:vk32 = KMOVDkm %0, 1, %noreg, 0, %noreg
+ ; CHECK: %5:vk32 = KMOVDkm %0, 1, $noreg, 0, $noreg
; CHECK: %6:vk32 = KSHIFTRDri %5, 2
; CHECK: %7:vk32 = KSHIFTLDri %6, 1
; CHECK: %8:vk32 = KNOTDrr %7
@@ -441,24 +441,24 @@
; CHECK: %11:vk32 = KXORDrr %10, %6
; CHECK: %12:vk32 = KANDNDrr %11, %9
; CHECK: %13:vk32 = KADDDrr %12, %11
- %5 = MOV32rm %0, 1, %noreg, 0, %noreg
- %6 = SHR32ri %5, 2, implicit-def dead %eflags
- %7 = SHL32ri %6, 1, implicit-def dead %eflags
+ %5 = MOV32rm %0, 1, $noreg, 0, $noreg
+ %6 = SHR32ri %5, 2, implicit-def dead $eflags
+ %7 = SHL32ri %6, 1, implicit-def dead $eflags
%8 = NOT32r %7
- %9 = OR32rr %8, %6, implicit-def dead %eflags
- %10 = AND32rr %9, %7, implicit-def dead %eflags
- %11 = XOR32rr %10, %6, implicit-def dead %eflags
- %12 = ANDN32rr %11, %9, implicit-def dead %eflags
- %13 = ADD32rr %12, %11, implicit-def dead %eflags
+ %9 = OR32rr %8, %6, implicit-def dead $eflags
+ %10 = AND32rr %9, %7, implicit-def dead $eflags
+ %11 = XOR32rr %10, %6, implicit-def dead $eflags
+ %12 = ANDN32rr %11, %9, implicit-def dead $eflags
+ %13 = ADD32rr %12, %11, implicit-def dead $eflags
; CHECK: %3:vk32wm = COPY %13
%3 = COPY %13
%4 = VMOVDQU16Zrrk %2, killed %3, %1
- VMOVDQA32Zmr %0, 1, %noreg, 0, %noreg, killed %4
+ VMOVDQA32Zmr %0, 1, $noreg, 0, $noreg, killed %4
- ; CHECK: KTESTDrr %13, %13, implicit-def %eflags
- TEST32rr %13, %13, implicit-def %eflags
- JE_1 %bb.1, implicit %eflags
+ ; CHECK: KTESTDrr %13, %13, implicit-def $eflags
+ TEST32rr %13, %13, implicit-def $eflags
+ JE_1 %bb.1, implicit $eflags
JMP_1 %bb.2
bb.1:
@@ -492,9 +492,9 @@
- { id: 12, class: gr64, preferred-register: '' }
- { id: 13, class: gr64, preferred-register: '' }
liveins:
- - { reg: '%rdi', virtual-reg: '%0' }
- - { reg: '%zmm0', virtual-reg: '%1' }
- - { reg: '%zmm1', virtual-reg: '%2' }
+ - { reg: '$rdi', virtual-reg: '%0' }
+ - { reg: '$zmm0', virtual-reg: '%1' }
+ - { reg: '$zmm1', virtual-reg: '%2' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -517,13 +517,13 @@
constants:
body: |
bb.0:
- liveins: %rdi, %zmm0, %zmm1
+ liveins: $rdi, $zmm0, $zmm1
- %0 = COPY %rdi
- %1 = COPY %zmm0
- %2 = COPY %zmm1
+ %0 = COPY $rdi
+ %1 = COPY $zmm0
+ %2 = COPY $zmm1
- ; CHECK: %5:vk64 = KMOVQkm %0, 1, %noreg, 0, %noreg
+ ; CHECK: %5:vk64 = KMOVQkm %0, 1, $noreg, 0, $noreg
; CHECK: %6:vk64 = KSHIFTRQri %5, 2
; CHECK: %7:vk64 = KSHIFTLQri %6, 1
; CHECK: %8:vk64 = KNOTQrr %7
@@ -532,24 +532,24 @@
; CHECK: %11:vk64 = KXORQrr %10, %6
; CHECK: %12:vk64 = KANDNQrr %11, %9
; CHECK: %13:vk64 = KADDQrr %12, %11
- %5 = MOV64rm %0, 1, %noreg, 0, %noreg
- %6 = SHR64ri %5, 2, implicit-def dead %eflags
- %7 = SHL64ri %6, 1, implicit-def dead %eflags
+ %5 = MOV64rm %0, 1, $noreg, 0, $noreg
+ %6 = SHR64ri %5, 2, implicit-def dead $eflags
+ %7 = SHL64ri %6, 1, implicit-def dead $eflags
%8 = NOT64r %7
- %9 = OR64rr %8, %6, implicit-def dead %eflags
- %10 = AND64rr %9, %7, implicit-def dead %eflags
- %11 = XOR64rr %10, %6, implicit-def dead %eflags
- %12 = ANDN64rr %11, %9, implicit-def dead %eflags
- %13 = ADD64rr %12, %11, implicit-def dead %eflags
+ %9 = OR64rr %8, %6, implicit-def dead $eflags
+ %10 = AND64rr %9, %7, implicit-def dead $eflags
+ %11 = XOR64rr %10, %6, implicit-def dead $eflags
+ %12 = ANDN64rr %11, %9, implicit-def dead $eflags
+ %13 = ADD64rr %12, %11, implicit-def dead $eflags
; CHECK: %3:vk64wm = COPY %13
%3 = COPY %13
%4 = VMOVDQU8Zrrk %2, killed %3, %1
- VMOVDQA32Zmr %0, 1, %noreg, 0, %noreg, killed %4
+ VMOVDQA32Zmr %0, 1, $noreg, 0, $noreg, killed %4
- ; CHECK: KTESTQrr %13, %13, implicit-def %eflags
- TEST64rr %13, %13, implicit-def %eflags
- JE_1 %bb.1, implicit %eflags
+ ; CHECK: KTESTQrr %13, %13, implicit-def $eflags
+ TEST64rr %13, %13, implicit-def $eflags
+ JE_1 %bb.1, implicit $eflags
JMP_1 %bb.2
bb.1:
@@ -576,9 +576,9 @@
- { id: 5, class: gr16, preferred-register: '' }
- { id: 6, class: gr16, preferred-register: '' }
liveins:
- - { reg: '%rdi', virtual-reg: '%0' }
- - { reg: '%zmm0', virtual-reg: '%1' }
- - { reg: '%zmm1', virtual-reg: '%2' }
+ - { reg: '$rdi', virtual-reg: '%0' }
+ - { reg: '$zmm0', virtual-reg: '%1' }
+ - { reg: '$zmm1', virtual-reg: '%2' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -601,22 +601,22 @@
constants:
body: |
bb.0:
- liveins: %rdi, %zmm0, %zmm1
+ liveins: $rdi, $zmm0, $zmm1
- %0 = COPY %rdi
- %1 = COPY %zmm0
- %2 = COPY %zmm1
+ %0 = COPY $rdi
+ %1 = COPY $zmm0
+ %2 = COPY $zmm1
- ; CHECK: %7:vk8 = KMOVBkm %0, 1, %noreg, 0, %noreg
+ ; CHECK: %7:vk8 = KMOVBkm %0, 1, $noreg, 0, $noreg
; CHECK: %5:vk16 = COPY %7
; CHECK: %6:vk16 = KNOTWrr %5
- %5 = MOVZX16rm8 %0, 1, %noreg, 0, %noreg
+ %5 = MOVZX16rm8 %0, 1, $noreg, 0, $noreg
%6 = NOT16r %5
; CHECK: %3:vk16wm = COPY %6
%3 = COPY %6
%4 = VMOVAPSZrrk %2, killed %3, %1
- VMOVAPSZmr %0, 1, %noreg, 0, %noreg, killed %4
+ VMOVAPSZmr %0, 1, $noreg, 0, $noreg, killed %4
RET 0
...
@@ -639,9 +639,9 @@
- { id: 6, class: gr32, preferred-register: '' }
- { id: 7, class: gr32, preferred-register: '' }
liveins:
- - { reg: '%rdi', virtual-reg: '%0' }
- - { reg: '%zmm0', virtual-reg: '%1' }
- - { reg: '%zmm1', virtual-reg: '%2' }
+ - { reg: '$rdi', virtual-reg: '%0' }
+ - { reg: '$zmm0', virtual-reg: '%1' }
+ - { reg: '$zmm1', virtual-reg: '%2' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -664,25 +664,25 @@
constants:
body: |
bb.0:
- liveins: %rdi, %zmm0, %zmm1
+ liveins: $rdi, $zmm0, $zmm1
- %0 = COPY %rdi
- %1 = COPY %zmm0
- %2 = COPY %zmm1
+ %0 = COPY $rdi
+ %1 = COPY $zmm0
+ %2 = COPY $zmm1
- ; CHECK: %8:vk8 = KMOVBkm %0, 1, %noreg, 0, %noreg
+ ; CHECK: %8:vk8 = KMOVBkm %0, 1, $noreg, 0, $noreg
; CHECK: %5:vk32 = COPY %8
- ; CHECK: %9:vk16 = KMOVWkm %0, 1, %noreg, 0, %noreg
+ ; CHECK: %9:vk16 = KMOVWkm %0, 1, $noreg, 0, $noreg
; CHECK: %6:vk32 = COPY %9
; CHECK: %7:vk32 = KADDDrr %5, %6
- %5 = MOVZX32rm8 %0, 1, %noreg, 0, %noreg
- %6 = MOVZX32rm16 %0, 1, %noreg, 0, %noreg
- %7 = ADD32rr %5, %6, implicit-def dead %eflags
+ %5 = MOVZX32rm8 %0, 1, $noreg, 0, $noreg
+ %6 = MOVZX32rm16 %0, 1, $noreg, 0, $noreg
+ %7 = ADD32rr %5, %6, implicit-def dead $eflags
; CHECK: %3:vk64wm = COPY %7
%3 = COPY %7
%4 = VMOVDQU16Zrrk %2, killed %3, %1
- VMOVDQA32Zmr %0, 1, %noreg, 0, %noreg, killed %4
+ VMOVDQA32Zmr %0, 1, $noreg, 0, $noreg, killed %4
RET 0
...
@@ -705,9 +705,9 @@
- { id: 6, class: gr64, preferred-register: '' }
- { id: 7, class: gr64, preferred-register: '' }
liveins:
- - { reg: '%rdi', virtual-reg: '%0' }
- - { reg: '%zmm0', virtual-reg: '%1' }
- - { reg: '%zmm1', virtual-reg: '%2' }
+ - { reg: '$rdi', virtual-reg: '%0' }
+ - { reg: '$zmm0', virtual-reg: '%1' }
+ - { reg: '$zmm1', virtual-reg: '%2' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -730,25 +730,25 @@
constants:
body: |
bb.0:
- liveins: %rdi, %zmm0, %zmm1
+ liveins: $rdi, $zmm0, $zmm1
- %0 = COPY %rdi
- %1 = COPY %zmm0
- %2 = COPY %zmm1
+ %0 = COPY $rdi
+ %1 = COPY $zmm0
+ %2 = COPY $zmm1
- ; CHECK: %8:vk8 = KMOVBkm %0, 1, %noreg, 0, %noreg
+ ; CHECK: %8:vk8 = KMOVBkm %0, 1, $noreg, 0, $noreg
; CHECK: %5:vk64 = COPY %8
- ; CHECK: %9:vk16 = KMOVWkm %0, 1, %noreg, 0, %noreg
+ ; CHECK: %9:vk16 = KMOVWkm %0, 1, $noreg, 0, $noreg
; CHECK: %6:vk64 = COPY %9
; CHECK: %7:vk64 = KADDQrr %5, %6
- %5 = MOVZX64rm8 %0, 1, %noreg, 0, %noreg
- %6 = MOVZX64rm16 %0, 1, %noreg, 0, %noreg
- %7 = ADD64rr %5, %6, implicit-def dead %eflags
+ %5 = MOVZX64rm8 %0, 1, $noreg, 0, $noreg
+ %6 = MOVZX64rm16 %0, 1, $noreg, 0, $noreg
+ %7 = ADD64rr %5, %6, implicit-def dead $eflags
; CHECK: %3:vk64wm = COPY %7
%3 = COPY %7
%4 = VMOVDQU8Zrrk %2, killed %3, %1
- VMOVDQA32Zmr %0, 1, %noreg, 0, %noreg, killed %4
+ VMOVDQA32Zmr %0, 1, $noreg, 0, $noreg, killed %4
RET 0
...
diff --git a/llvm/test/CodeGen/X86/dynamic-regmask.ll b/llvm/test/CodeGen/X86/dynamic-regmask.ll
index 7576aed..01a87ea 100644
--- a/llvm/test/CodeGen/X86/dynamic-regmask.ll
+++ b/llvm/test/CodeGen/X86/dynamic-regmask.ll
@@ -11,8 +11,8 @@
ret i32 %b2
}
; CHECK: name: caller
-; CHECK: CALL64pcrel32 @callee, CustomRegMask(%bh,%bl,%bp,%bpl,%bx,%ebp,%ebx,%esp,%rbp,%rbx,%rsp,%sp,%spl,%r10,%r11,%r12,%r13,%r14,%r15,%xmm8,%xmm9,%xmm10,%xmm11,%xmm12,%xmm13,%xmm14,%xmm15,%r10b,%r11b,%r12b,%r13b,%r14b,%r15b,%r10d,%r11d,%r12d,%r13d,%r14d,%r15d,%r10w,%r11w,%r12w,%r13w,%r14w,%r15w)
-; CHECK: RET 0, %eax
+; CHECK: CALL64pcrel32 @callee, CustomRegMask($bh,$bl,$bp,$bpl,$bx,$ebp,$ebx,$esp,$rbp,$rbx,$rsp,$sp,$spl,$r10,$r11,$r12,$r13,$r14,$r15,$xmm8,$xmm9,$xmm10,$xmm11,$xmm12,$xmm13,$xmm14,$xmm15,$r10b,$r11b,$r12b,$r13b,$r14b,$r15b,$r10d,$r11d,$r12d,$r13d,$r14d,$r15d,$r10w,$r11w,$r12w,$r13w,$r14w,$r15w)
+; CHECK: RET 0, $eax
define x86_regcallcc {i32, i32, i32} @test_callee(i32 %a0, i32 %b0, i32 %c0, i32 %d0, i32 %e0) nounwind {
%b1 = mul i32 7, %e0
@@ -24,7 +24,7 @@
ret {i32, i32, i32} %b6
}
; CHECK: name: test_callee
-; CHECK: calleeSavedRegisters: [ '%rbx', '%rbp', '%rsp', '%r10', '%r11', '%r12',
-; CHECK: '%r13', '%r14', '%r15', '%xmm8', '%xmm9', '%xmm10',
-; CHECK: '%xmm11', '%xmm12', '%xmm13', '%xmm14', '%xmm15' ]
-; CHECK: RET 0, %eax, %ecx, %edx
+; CHECK: calleeSavedRegisters: [ '$rbx', '$rbp', '$rsp', '$r10', '$r11', '$r12',
+; CHECK: '$r13', '$r14', '$r15', '$xmm8', '$xmm9', '$xmm10',
+; CHECK: '$xmm11', '$xmm12', '$xmm13', '$xmm14', '$xmm15' ]
+; CHECK: RET 0, $eax, $ecx, $edx
diff --git a/llvm/test/CodeGen/X86/eflags-copy-expansion.mir b/llvm/test/CodeGen/X86/eflags-copy-expansion.mir
index 11d4c81..385e3d9 100644
--- a/llvm/test/CodeGen/X86/eflags-copy-expansion.mir
+++ b/llvm/test/CodeGen/X86/eflags-copy-expansion.mir
@@ -21,11 +21,11 @@
name: foo
tracksRegLiveness: true
liveins:
- - { reg: '%edi' }
+ - { reg: '$edi' }
body: |
bb.0.entry:
- liveins: %edi
- NOOP implicit-def %al
+ liveins: $edi
+ NOOP implicit-def $al
; The bug was triggered only when LivePhysReg is used, which
; happens only when the heuristic for the liveness computation
@@ -46,19 +46,19 @@
NOOP
NOOP
; Save AL.
- ; CHECK: PUSH32r killed %eax
+ ; CHECK: PUSH32r killed $eax
; Copy edi into EFLAGS
- ; CHECK-NEXT: %eax = MOV32rr %edi
- ; CHECK-NEXT: %al = ADD8ri %al, 127, implicit-def %eflags
- ; CHECK-NEXT: SAHF implicit-def %eflags, implicit %ah
- %eflags = COPY %edi
+ ; CHECK-NEXT: $eax = MOV32rr $edi
+ ; CHECK-NEXT: $al = ADD8ri $al, 127, implicit-def $eflags
+ ; CHECK-NEXT: SAHF implicit-def $eflags, implicit $ah
+ $eflags = COPY $edi
; Restore AL.
- ; CHECK-NEXT: %eax = POP32r
+ ; CHECK-NEXT: $eax = POP32r
bb.1.false:
- liveins: %al
- NOOP implicit %al
+ liveins: $al
+ NOOP implicit $al
RETQ
...
diff --git a/llvm/test/CodeGen/X86/evex-to-vex-compress.mir b/llvm/test/CodeGen/X86/evex-to-vex-compress.mir
index 5b593b3..9937ca0 100755
--- a/llvm/test/CodeGen/X86/evex-to-vex-compress.mir
+++ b/llvm/test/CodeGen/X86/evex-to-vex-compress.mir
@@ -17,880 +17,880 @@
name: evex_z256_to_vex_test
body: |
bb.0:
- ; CHECK: VMOVAPDYmr %rdi, 1, %noreg, 0, %noreg, %ymm0
- VMOVAPDZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm0
- ; CHECK: %ymm0 = VMOVAPDYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VMOVAPDZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VMOVAPDYrr %ymm0
- %ymm0 = VMOVAPDZ256rr %ymm0
- ; CHECK: %ymm0 = VMOVAPDYrr_REV %ymm0
- %ymm0 = VMOVAPDZ256rr_REV %ymm0
- ; CHECK: VMOVAPSYmr %rdi, 1, %noreg, 0, %noreg, %ymm0
- VMOVAPSZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm0
- ; CHECK: %ymm0 = VMOVAPSYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VMOVAPSZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VMOVAPSYrr %ymm0
- %ymm0 = VMOVAPSZ256rr %ymm0
- ; CHECK: %ymm0 = VMOVAPSYrr_REV %ymm0
- %ymm0 = VMOVAPSZ256rr_REV %ymm0
- ; CHECK: %ymm0 = VMOVDDUPYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VMOVDDUPZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VMOVDDUPYrr %ymm0
- %ymm0 = VMOVDDUPZ256rr %ymm0
- ; CHECK: VMOVDQAYmr %rdi, 1, %noreg, 0, %noreg, %ymm0
- VMOVDQA32Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm0
- ; CHECK: %ymm0 = VMOVDQAYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VMOVDQA32Z256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VMOVDQAYrr %ymm0
- %ymm0 = VMOVDQA32Z256rr %ymm0
- ; CHECK: %ymm0 = VMOVDQAYrr_REV %ymm0
- %ymm0 = VMOVDQA32Z256rr_REV %ymm0
- ; CHECK: VMOVDQAYmr %rdi, 1, %noreg, 0, %noreg, %ymm0
- VMOVDQA64Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm0
- ; CHECK: %ymm0 = VMOVDQAYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VMOVDQA64Z256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VMOVDQAYrr %ymm0
- %ymm0 = VMOVDQA64Z256rr %ymm0
- ; CHECK: %ymm0 = VMOVDQAYrr_REV %ymm0
- %ymm0 = VMOVDQA64Z256rr_REV %ymm0
- ; CHECK: VMOVDQUYmr %rdi, 1, %noreg, 0, %noreg, %ymm0
- VMOVDQU16Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm0
- ; CHECK: %ymm0 = VMOVDQUYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VMOVDQU16Z256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VMOVDQUYrr %ymm0
- %ymm0 = VMOVDQU16Z256rr %ymm0
- ; CHECK: %ymm0 = VMOVDQUYrr_REV %ymm0
- %ymm0 = VMOVDQU16Z256rr_REV %ymm0
- ; CHECK: VMOVDQUYmr %rdi, 1, %noreg, 0, %noreg, %ymm0
- VMOVDQU32Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm0
- ; CHECK: %ymm0 = VMOVDQUYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VMOVDQU32Z256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VMOVDQUYrr %ymm0
- %ymm0 = VMOVDQU32Z256rr %ymm0
- ; CHECK: %ymm0 = VMOVDQUYrr_REV %ymm0
- %ymm0 = VMOVDQU32Z256rr_REV %ymm0
- ; CHECK: VMOVDQUYmr %rdi, 1, %noreg, 0, %noreg, %ymm0
- VMOVDQU64Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm0
- ; CHECK: %ymm0 = VMOVDQUYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VMOVDQU64Z256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VMOVDQUYrr %ymm0
- %ymm0 = VMOVDQU64Z256rr %ymm0
- ; CHECK: %ymm0 = VMOVDQUYrr_REV %ymm0
- %ymm0 = VMOVDQU64Z256rr_REV %ymm0
- ; CHECK: VMOVDQUYmr %rdi, 1, %noreg, 0, %noreg, %ymm0
- VMOVDQU8Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm0
- ; CHECK: %ymm0 = VMOVDQUYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VMOVDQU8Z256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VMOVDQUYrr %ymm0
- %ymm0 = VMOVDQU8Z256rr %ymm0
- ; CHECK: %ymm0 = VMOVDQUYrr_REV %ymm0
- %ymm0 = VMOVDQU8Z256rr_REV %ymm0
- ; CHECK: %ymm0 = VMOVNTDQAYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VMOVNTDQAZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: VMOVNTDQYmr %rdi, 1, %noreg, 0, %noreg, %ymm0
- VMOVNTDQZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm0
- ; CHECK: VMOVNTPDYmr %rdi, 1, %noreg, 0, %noreg, %ymm0
- VMOVNTPDZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm0
- ; CHECK: VMOVNTPSYmr %rdi, 1, %noreg, 0, %noreg, %ymm0
- VMOVNTPSZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm0
- ; CHECK: %ymm0 = VMOVSHDUPYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VMOVSHDUPZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VMOVSHDUPYrr %ymm0
- %ymm0 = VMOVSHDUPZ256rr %ymm0
- ; CHECK: %ymm0 = VMOVSLDUPYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VMOVSLDUPZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VMOVSLDUPYrr %ymm0
- %ymm0 = VMOVSLDUPZ256rr %ymm0
- ; CHECK: VMOVUPDYmr %rdi, 1, %noreg, 0, %noreg, %ymm0
- VMOVUPDZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm0
- ; CHECK: %ymm0 = VMOVUPDYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VMOVUPDZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VMOVUPDYrr %ymm0
- %ymm0 = VMOVUPDZ256rr %ymm0
- ; CHECK: %ymm0 = VMOVUPDYrr_REV %ymm0
- %ymm0 = VMOVUPDZ256rr_REV %ymm0
- ; CHECK: VMOVUPSYmr %rdi, 1, %noreg, 0, %noreg, %ymm0
- VMOVUPSZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm0
- ; CHECK: %ymm0 = VPANDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPANDDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPANDYrr %ymm0, %ymm1
- %ymm0 = VPANDDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPANDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPANDQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPANDYrr %ymm0, %ymm1
- %ymm0 = VPANDQZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPANDNYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPANDNDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPANDNYrr %ymm0, %ymm1
- %ymm0 = VPANDNDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPANDNYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPANDNQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPANDNYrr %ymm0, %ymm1
- %ymm0 = VPANDNQZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPAVGBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPAVGBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPAVGBYrr %ymm0, %ymm1
- %ymm0 = VPAVGBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPAVGWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPAVGWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPAVGWYrr %ymm0, %ymm1
- %ymm0 = VPAVGWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPADDBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPADDBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPADDBYrr %ymm0, %ymm1
- %ymm0 = VPADDBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPADDDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPADDDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPADDDYrr %ymm0, %ymm1
- %ymm0 = VPADDDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPADDQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPADDQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPADDQYrr %ymm0, %ymm1
- %ymm0 = VPADDQZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPADDSBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPADDSBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPADDSBYrr %ymm0, %ymm1
- %ymm0 = VPADDSBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPADDSWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPADDSWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPADDSWYrr %ymm0, %ymm1
- %ymm0 = VPADDSWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPADDUSBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPADDUSBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPADDUSBYrr %ymm0, %ymm1
- %ymm0 = VPADDUSBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPADDUSWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPADDUSWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPADDUSWYrr %ymm0, %ymm1
- %ymm0 = VPADDUSWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPADDWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPADDWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPADDWYrr %ymm0, %ymm1
- %ymm0 = VPADDWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VMULPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VMULPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VMULPDYrr %ymm0, %ymm1
- %ymm0 = VMULPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VMULPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VMULPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VMULPSYrr %ymm0, %ymm1
- %ymm0 = VMULPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VORPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VORPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VORPDYrr %ymm0, %ymm1
- %ymm0 = VORPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VORPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VORPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VORPSYrr %ymm0, %ymm1
- %ymm0 = VORPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMADDUBSWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMADDUBSWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMADDUBSWYrr %ymm0, %ymm1
- %ymm0 = VPMADDUBSWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMADDWDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMADDWDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMADDWDYrr %ymm0, %ymm1
- %ymm0 = VPMADDWDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMAXSBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMAXSBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMAXSBYrr %ymm0, %ymm1
- %ymm0 = VPMAXSBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMAXSDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMAXSDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMAXSDYrr %ymm0, %ymm1
- %ymm0 = VPMAXSDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMAXSWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMAXSWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMAXSWYrr %ymm0, %ymm1
- %ymm0 = VPMAXSWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMAXUBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMAXUBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMAXUBYrr %ymm0, %ymm1
- %ymm0 = VPMAXUBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMAXUDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMAXUDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMAXUDYrr %ymm0, %ymm1
- %ymm0 = VPMAXUDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMAXUWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMAXUWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMAXUWYrr %ymm0, %ymm1
- %ymm0 = VPMAXUWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMINSBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMINSBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMINSBYrr %ymm0, %ymm1
- %ymm0 = VPMINSBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMINSDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMINSDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMINSDYrr %ymm0, %ymm1
- %ymm0 = VPMINSDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMINSWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMINSWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMINSWYrr %ymm0, %ymm1
- %ymm0 = VPMINSWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMINUBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMINUBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMINUBYrr %ymm0, %ymm1
- %ymm0 = VPMINUBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMINUDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMINUDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMINUDYrr %ymm0, %ymm1
- %ymm0 = VPMINUDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMINUWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMINUWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMINUWYrr %ymm0, %ymm1
- %ymm0 = VPMINUWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMULDQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMULDQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMULDQYrr %ymm0, %ymm1
- %ymm0 = VPMULDQZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMULHRSWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMULHRSWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMULHRSWYrr %ymm0, %ymm1
- %ymm0 = VPMULHRSWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMULHUWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMULHUWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMULHUWYrr %ymm0, %ymm1
- %ymm0 = VPMULHUWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMULHWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMULHWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMULHWYrr %ymm0, %ymm1
- %ymm0 = VPMULHWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMULLDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMULLDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMULLDYrr %ymm0, %ymm1
- %ymm0 = VPMULLDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMULLWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMULLWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMULLWYrr %ymm0, %ymm1
- %ymm0 = VPMULLWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPMULUDQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMULUDQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMULUDQYrr %ymm0, %ymm1
- %ymm0 = VPMULUDQZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPORYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPORDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPORYrr %ymm0, %ymm1
- %ymm0 = VPORDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPORYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPORQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPORYrr %ymm0, %ymm1
- %ymm0 = VPORQZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPSUBBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPSUBBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPSUBBYrr %ymm0, %ymm1
- %ymm0 = VPSUBBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPSUBDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPSUBDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPSUBDYrr %ymm0, %ymm1
- %ymm0 = VPSUBDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPSUBQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPSUBQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPSUBQYrr %ymm0, %ymm1
- %ymm0 = VPSUBQZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPSUBSBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPSUBSBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPSUBSBYrr %ymm0, %ymm1
- %ymm0 = VPSUBSBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPSUBSWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPSUBSWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPSUBSWYrr %ymm0, %ymm1
- %ymm0 = VPSUBSWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPSUBUSBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPSUBUSBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPSUBUSBYrr %ymm0, %ymm1
- %ymm0 = VPSUBUSBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPSUBUSWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPSUBUSWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPSUBUSWYrr %ymm0, %ymm1
- %ymm0 = VPSUBUSWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPSUBWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPSUBWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPSUBWYrr %ymm0, %ymm1
- %ymm0 = VPSUBWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPXORYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPXORDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPXORYrr %ymm0, %ymm1
- %ymm0 = VPXORDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPXORYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPXORQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPXORYrr %ymm0, %ymm1
- %ymm0 = VPXORQZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VADDPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VADDPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VADDPDYrr %ymm0, %ymm1
- %ymm0 = VADDPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VADDPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VADDPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VADDPSYrr %ymm0, %ymm1
- %ymm0 = VADDPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VANDNPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VANDNPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VANDNPDYrr %ymm0, %ymm1
- %ymm0 = VANDNPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VANDNPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VANDNPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VANDNPSYrr %ymm0, %ymm1
- %ymm0 = VANDNPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VANDPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VANDPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VANDPDYrr %ymm0, %ymm1
- %ymm0 = VANDPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VANDPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VANDPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VANDPSYrr %ymm0, %ymm1
- %ymm0 = VANDPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VDIVPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VDIVPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VDIVPDYrr %ymm0, %ymm1
- %ymm0 = VDIVPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VDIVPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VDIVPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VDIVPSYrr %ymm0, %ymm1
- %ymm0 = VDIVPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VMAXCPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VMAXCPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VMAXCPDYrr %ymm0, %ymm1
- %ymm0 = VMAXCPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VMAXCPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VMAXCPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VMAXCPSYrr %ymm0, %ymm1
- %ymm0 = VMAXCPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VMAXCPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VMAXPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VMAXCPDYrr %ymm0, %ymm1
- %ymm0 = VMAXPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VMAXCPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VMAXPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VMAXCPSYrr %ymm0, %ymm1
- %ymm0 = VMAXPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VMINCPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VMINCPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VMINCPDYrr %ymm0, %ymm1
- %ymm0 = VMINCPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VMINCPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VMINCPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VMINCPSYrr %ymm0, %ymm1
- %ymm0 = VMINCPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VMINCPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VMINPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VMINCPDYrr %ymm0, %ymm1
- %ymm0 = VMINPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VMINCPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VMINPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VMINCPSYrr %ymm0, %ymm1
- %ymm0 = VMINPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VXORPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VXORPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VXORPDYrr %ymm0, %ymm1
- %ymm0 = VXORPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VXORPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VXORPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VXORPSYrr %ymm0, %ymm1
- %ymm0 = VXORPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPACKSSDWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPACKSSDWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPACKSSDWYrr %ymm0, %ymm1
- %ymm0 = VPACKSSDWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPACKSSWBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPACKSSWBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPACKSSWBYrr %ymm0, %ymm1
- %ymm0 = VPACKSSWBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPACKUSDWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPACKUSDWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPACKUSDWYrr %ymm0, %ymm1
- %ymm0 = VPACKUSDWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPACKUSWBYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPACKUSWBZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPACKUSWBYrr %ymm0, %ymm1
- %ymm0 = VPACKUSWBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VUNPCKHPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VUNPCKHPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VUNPCKHPDYrr %ymm0, %ymm1
- %ymm0 = VUNPCKHPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VUNPCKHPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VUNPCKHPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VUNPCKHPSYrr %ymm0, %ymm1
- %ymm0 = VUNPCKHPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VUNPCKLPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VUNPCKLPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VUNPCKLPDYrr %ymm0, %ymm1
- %ymm0 = VUNPCKLPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VUNPCKLPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VUNPCKLPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VUNPCKLPSYrr %ymm0, %ymm1
- %ymm0 = VUNPCKLPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VSUBPDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VSUBPDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VSUBPDYrr %ymm0, %ymm1
- %ymm0 = VSUBPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VSUBPSYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VSUBPSZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VSUBPSYrr %ymm0, %ymm1
- %ymm0 = VSUBPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPUNPCKHBWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPUNPCKHBWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPUNPCKHBWYrr %ymm0, %ymm1
- %ymm0 = VPUNPCKHBWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPUNPCKHDQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPUNPCKHDQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPUNPCKHDQYrr %ymm0, %ymm1
- %ymm0 = VPUNPCKHDQZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPUNPCKHQDQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPUNPCKHQDQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPUNPCKHQDQYrr %ymm0, %ymm1
- %ymm0 = VPUNPCKHQDQZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPUNPCKHWDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPUNPCKHWDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPUNPCKHWDYrr %ymm0, %ymm1
- %ymm0 = VPUNPCKHWDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPUNPCKLBWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPUNPCKLBWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPUNPCKLBWYrr %ymm0, %ymm1
- %ymm0 = VPUNPCKLBWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPUNPCKLDQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPUNPCKLDQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPUNPCKLDQYrr %ymm0, %ymm1
- %ymm0 = VPUNPCKLDQZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPUNPCKLQDQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPUNPCKLQDQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPUNPCKLQDQYrr %ymm0, %ymm1
- %ymm0 = VPUNPCKLQDQZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPUNPCKLWDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPUNPCKLWDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPUNPCKLWDYrr %ymm0, %ymm1
- %ymm0 = VPUNPCKLWDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VFMADD132PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFMADD132PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFMADD132PDYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFMADD132PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMADD132PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFMADD132PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFMADD132PSYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFMADD132PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMADD213PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFMADD213PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFMADD213PDYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFMADD213PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMADD213PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFMADD213PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFMADD213PSYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFMADD213PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMADD231PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFMADD231PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFMADD231PDYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFMADD231PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMADD231PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFMADD231PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFMADD231PSYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFMADD231PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMADDSUB132PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFMADDSUB132PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFMADDSUB132PDYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFMADDSUB132PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMADDSUB132PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFMADDSUB132PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFMADDSUB132PSYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFMADDSUB132PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMADDSUB213PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFMADDSUB213PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFMADDSUB213PDYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFMADDSUB213PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMADDSUB213PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFMADDSUB213PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFMADDSUB213PSYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFMADDSUB213PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMADDSUB231PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFMADDSUB231PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFMADDSUB231PDYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFMADDSUB231PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMADDSUB231PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFMADDSUB231PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFMADDSUB231PSYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFMADDSUB231PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMSUB132PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFMSUB132PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFMSUB132PDYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFMSUB132PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMSUB132PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFMSUB132PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFMSUB132PSYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFMSUB132PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMSUB213PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFMSUB213PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFMSUB213PDYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFMSUB213PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMSUB213PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFMSUB213PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFMSUB213PSYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFMSUB213PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMSUB231PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFMSUB231PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFMSUB231PDYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFMSUB231PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMSUB231PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFMSUB231PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFMSUB231PSYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFMSUB231PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMSUBADD132PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFMSUBADD132PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFMSUBADD132PDYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFMSUBADD132PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMSUBADD132PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFMSUBADD132PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFMSUBADD132PSYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFMSUBADD132PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMSUBADD213PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFMSUBADD213PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFMSUBADD213PDYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFMSUBADD213PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMSUBADD213PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFMSUBADD213PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFMSUBADD213PSYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFMSUBADD213PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMSUBADD231PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFMSUBADD231PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFMSUBADD231PDYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFMSUBADD231PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFMSUBADD231PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFMSUBADD231PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFMSUBADD231PSYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFMSUBADD231PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFNMADD132PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFNMADD132PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFNMADD132PDYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFNMADD132PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFNMADD132PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFNMADD132PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFNMADD132PSYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFNMADD132PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFNMADD213PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFNMADD213PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFNMADD213PDYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFNMADD213PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFNMADD213PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFNMADD213PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFNMADD213PSYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFNMADD213PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFNMADD231PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFNMADD231PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFNMADD231PDYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFNMADD231PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFNMADD231PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFNMADD231PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFNMADD231PSYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFNMADD231PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFNMSUB132PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFNMSUB132PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFNMSUB132PDYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFNMSUB132PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFNMSUB132PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFNMSUB132PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFNMSUB132PSYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFNMSUB132PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFNMSUB213PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFNMSUB213PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFNMSUB213PDYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFNMSUB213PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFNMSUB213PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFNMSUB213PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFNMSUB213PSYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFNMSUB213PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFNMSUB231PDYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFNMSUB231PDZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFNMSUB231PDYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFNMSUB231PDZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VFNMSUB231PSYm %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- %ymm0 = VFNMSUB231PSZ256m %ymm0, %ymm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VFNMSUB231PSYr %ymm0, %ymm1, %ymm2
- %ymm0 = VFNMSUB231PSZ256r %ymm0, %ymm1, %ymm2
- ; CHECK: %ymm0 = VPSRADYri %ymm0, 7
- %ymm0 = VPSRADZ256ri %ymm0, 7
- ; CHECK: %ymm0 = VPSRADYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPSRADZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPSRADYrr %ymm0, %xmm1
- %ymm0 = VPSRADZ256rr %ymm0, %xmm1
- ; CHECK: %ymm0 = VPSRAVDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPSRAVDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPSRAVDYrr %ymm0, %ymm1
- %ymm0 = VPSRAVDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPSRAWYri %ymm0, 7
- %ymm0 = VPSRAWZ256ri %ymm0, 7
- ; CHECK: %ymm0 = VPSRAWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPSRAWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPSRAWYrr %ymm0, %xmm1
- %ymm0 = VPSRAWZ256rr %ymm0, %xmm1
- ; CHECK: %ymm0 = VPSRLDQYri %ymm0, %ymm1
- %ymm0 = VPSRLDQZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPSRLDYri %ymm0, 7
- %ymm0 = VPSRLDZ256ri %ymm0, 7
- ; CHECK: %ymm0 = VPSRLDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPSRLDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPSRLDYrr %ymm0, %xmm1
- %ymm0 = VPSRLDZ256rr %ymm0, %xmm1
- ; CHECK: %ymm0 = VPSRLQYri %ymm0, 7
- %ymm0 = VPSRLQZ256ri %ymm0, 7
- ; CHECK: %ymm0 = VPSRLQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPSRLQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPSRLQYrr %ymm0, %xmm1
- %ymm0 = VPSRLQZ256rr %ymm0, %xmm1
- ; CHECK: %ymm0 = VPSRLVDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPSRLVDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPSRLVDYrr %ymm0, %ymm1
- %ymm0 = VPSRLVDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPSRLVQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPSRLVQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPSRLVQYrr %ymm0, %ymm1
- %ymm0 = VPSRLVQZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPSRLWYri %ymm0, 7
- %ymm0 = VPSRLWZ256ri %ymm0, 7
- ; CHECK: %ymm0 = VPSRLWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPSRLWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPSRLWYrr %ymm0, %xmm1
- %ymm0 = VPSRLWZ256rr %ymm0, %xmm1
- ; CHECK: %ymm0 = VPMOVSXBDYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMOVSXBDZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMOVSXBDYrr %xmm0
- %ymm0 = VPMOVSXBDZ256rr %xmm0
- ; CHECK: %ymm0 = VPMOVSXBQYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMOVSXBQZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMOVSXBQYrr %xmm0
- %ymm0 = VPMOVSXBQZ256rr %xmm0
- ; CHECK: %ymm0 = VPMOVSXBWYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMOVSXBWZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMOVSXBWYrr %xmm0
- %ymm0 = VPMOVSXBWZ256rr %xmm0
- ; CHECK: %ymm0 = VPMOVSXDQYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMOVSXDQZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMOVSXDQYrr %xmm0
- %ymm0 = VPMOVSXDQZ256rr %xmm0
- ; CHECK: %ymm0 = VPMOVSXWDYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMOVSXWDZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMOVSXWDYrr %xmm0
- %ymm0 = VPMOVSXWDZ256rr %xmm0
- ; CHECK: %ymm0 = VPMOVSXWQYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMOVSXWQZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMOVSXWQYrr %xmm0
- %ymm0 = VPMOVSXWQZ256rr %xmm0
- ; CHECK: %ymm0 = VPMOVZXBDYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMOVZXBDZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMOVZXBDYrr %xmm0
- %ymm0 = VPMOVZXBDZ256rr %xmm0
- ; CHECK: %ymm0 = VPMOVZXBQYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMOVZXBQZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMOVZXBQYrr %xmm0
- %ymm0 = VPMOVZXBQZ256rr %xmm0
- ; CHECK: %ymm0 = VPMOVZXBWYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMOVZXBWZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMOVZXBWYrr %xmm0
- %ymm0 = VPMOVZXBWZ256rr %xmm0
- ; CHECK: %ymm0 = VPMOVZXDQYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMOVZXDQZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMOVZXDQYrr %xmm0
- %ymm0 = VPMOVZXDQZ256rr %xmm0
- ; CHECK: %ymm0 = VPMOVZXWDYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMOVZXWDZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMOVZXWDYrr %xmm0
- %ymm0 = VPMOVZXWDZ256rr %xmm0
- ; CHECK: %ymm0 = VPMOVZXWQYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPMOVZXWQZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPMOVZXWQYrr %xmm0
- %ymm0 = VPMOVZXWQZ256rr %xmm0
- ; CHECK: %ymm0 = VBROADCASTF128 %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VBROADCASTF32X4Z256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VBROADCASTSDYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VBROADCASTF32X2Z256m %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VBROADCASTSDYrr %xmm0
- %ymm0 = VBROADCASTF32X2Z256r %xmm0
- ; CHECK: %ymm0 = VBROADCASTSDYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VBROADCASTSDZ256m %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VBROADCASTSDYrr %xmm0
- %ymm0 = VBROADCASTSDZ256r %xmm0
- ; CHECK: %ymm0 = VBROADCASTSSYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VBROADCASTSSZ256m %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VBROADCASTSSYrr %xmm0
- %ymm0 = VBROADCASTSSZ256r %xmm0
- ; CHECK: %ymm0 = VPBROADCASTBYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPBROADCASTBZ256m %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPBROADCASTBYrr %xmm0
- %ymm0 = VPBROADCASTBZ256r %xmm0
- ; CHECK: %ymm0 = VPBROADCASTDYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPBROADCASTDZ256m %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPBROADCASTDYrr %xmm0
- %ymm0 = VPBROADCASTDZ256r %xmm0
- ; CHECK: %ymm0 = VPBROADCASTWYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPBROADCASTWZ256m %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPBROADCASTWYrr %xmm0
- %ymm0 = VPBROADCASTWZ256r %xmm0
- ; CHECK: %ymm0 = VBROADCASTI128 %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VBROADCASTI32X4Z256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPBROADCASTQYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VBROADCASTI32X2Z256m %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPBROADCASTQYrr %xmm0
- %ymm0 = VBROADCASTI32X2Z256r %xmm0
- ; CHECK: %ymm0 = VPBROADCASTQYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPBROADCASTQZ256m %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPBROADCASTQYrr %xmm0
- %ymm0 = VPBROADCASTQZ256r %xmm0
- ; CHECK: %ymm0 = VPABSBYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPABSBZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPABSBYrr %ymm0
- %ymm0 = VPABSBZ256rr %ymm0
- ; CHECK: %ymm0 = VPABSDYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPABSDZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPABSDYrr %ymm0
- %ymm0 = VPABSDZ256rr %ymm0
- ; CHECK: %ymm0 = VPABSWYrm %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPABSWZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPABSWYrr %ymm0
- %ymm0 = VPABSWZ256rr %ymm0
- ; CHECK: %ymm0 = VPSADBWYrm %ymm0, 1, %noreg, %rax, %noreg, %noreg
- %ymm0 = VPSADBWZ256rm %ymm0, 1, %noreg, %rax, %noreg, %noreg
- ; CHECK: %ymm0 = VPSADBWYrr %ymm0, %ymm1
- %ymm0 = VPSADBWZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPERMDYrm %ymm0, %rdi, 1, %noreg, 0, %noreg
- %ymm0 = VPERMDZ256rm %ymm0, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VPERMDYrr %ymm1, %ymm0
- %ymm0 = VPERMDZ256rr %ymm1, %ymm0
- ; CHECK: %ymm0 = VPERMILPDYmi %rdi, 1, %noreg, 0, %noreg, %noreg
- %ymm0 = VPERMILPDZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %ymm0 = VPERMILPDYri %ymm0, 7
- %ymm0 = VPERMILPDZ256ri %ymm0, 7
- ; CHECK: %ymm0 = VPERMILPDYrm %ymm0, %rdi, 1, %noreg, 0, %noreg
- %ymm0 = VPERMILPDZ256rm %ymm0, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VPERMILPDYrr %ymm1, %ymm0
- %ymm0 = VPERMILPDZ256rr %ymm1, %ymm0
- ; CHECK: %ymm0 = VPERMILPSYmi %rdi, 1, %noreg, 0, %noreg, %noreg
- %ymm0 = VPERMILPSZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %ymm0 = VPERMILPSYri %ymm0, 7
- %ymm0 = VPERMILPSZ256ri %ymm0, 7
- ; CHECK: %ymm0 = VPERMILPSYrm %ymm0, %rdi, 1, %noreg, 0, %noreg
- %ymm0 = VPERMILPSZ256rm %ymm0, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VPERMILPSYrr %ymm1, %ymm0
- %ymm0 = VPERMILPSZ256rr %ymm1, %ymm0
- ; CHECK: %ymm0 = VPERMPDYmi %rdi, 1, %noreg, 0, %noreg, %noreg
- %ymm0 = VPERMPDZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %ymm0 = VPERMPDYri %ymm0, 7
- %ymm0 = VPERMPDZ256ri %ymm0, 7
- ; CHECK: %ymm0 = VPERMPSYrm %ymm0, %rdi, 1, %noreg, 0, %noreg
- %ymm0 = VPERMPSZ256rm %ymm0, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VPERMPSYrr %ymm1, %ymm0
- %ymm0 = VPERMPSZ256rr %ymm1, %ymm0
- ; CHECK: %ymm0 = VPERMQYmi %rdi, 1, %noreg, 0, %noreg, %noreg
- %ymm0 = VPERMQZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %ymm0 = VPERMQYri %ymm0, 7
- %ymm0 = VPERMQZ256ri %ymm0, 7
- ; CHECK: %ymm0 = VPSLLDQYri %ymm0, 14
- %ymm0 = VPSLLDQZ256rr %ymm0, 14
- ; CHECK: %ymm0 = VPSLLDYri %ymm0, 7
- %ymm0 = VPSLLDZ256ri %ymm0, 7
- ; CHECK: %ymm0 = VPSLLDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPSLLDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPSLLDYrr %ymm0, 14
- %ymm0 = VPSLLDZ256rr %ymm0, 14
- ; CHECK: %ymm0 = VPSLLQYri %ymm0, 7
- %ymm0 = VPSLLQZ256ri %ymm0, 7
- ; CHECK: %ymm0 = VPSLLQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPSLLQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPSLLQYrr %ymm0, 14
- %ymm0 = VPSLLQZ256rr %ymm0, 14
- ; CHECK: %ymm0 = VPSLLVDYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPSLLVDZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPSLLVDYrr %ymm0, 14
- %ymm0 = VPSLLVDZ256rr %ymm0, 14
- ; CHECK: %ymm0 = VPSLLVQYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPSLLVQZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPSLLVQYrr %ymm0, 14
- %ymm0 = VPSLLVQZ256rr %ymm0, 14
- ; CHECK: %ymm0 = VPSLLWYri %ymm0, 7
- %ymm0 = VPSLLWZ256ri %ymm0, 7
- ; CHECK: %ymm0 = VPSLLWYrm %ymm0, %rip, 1, %noreg, %rax, %noreg
- %ymm0 = VPSLLWZ256rm %ymm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm0 = VPSLLWYrr %ymm0, 14
- %ymm0 = VPSLLWZ256rr %ymm0, 14
- ; CHECK: %ymm0 = VCVTDQ2PDYrm %rdi, %ymm0, 1, %noreg, 0
- %ymm0 = VCVTDQ2PDZ256rm %rdi, %ymm0, 1, %noreg, 0
- ; CHECK: %ymm0 = VCVTDQ2PDYrr %xmm0
- %ymm0 = VCVTDQ2PDZ256rr %xmm0
- ; CHECK: %ymm0 = VCVTDQ2PSYrm %rdi, %ymm0, 1, %noreg, 0
- %ymm0 = VCVTDQ2PSZ256rm %rdi, %ymm0, 1, %noreg, 0
- ; CHECK: %ymm0 = VCVTDQ2PSYrr %ymm0
- %ymm0 = VCVTDQ2PSZ256rr %ymm0
- ; CHECK: %xmm0 = VCVTPD2DQYrm %rdi, %ymm0, 1, %noreg, 0
- %xmm0 = VCVTPD2DQZ256rm %rdi, %ymm0, 1, %noreg, 0
- ; CHECK: %xmm0 = VCVTPD2DQYrr %ymm0
- %xmm0 = VCVTPD2DQZ256rr %ymm0
- ; CHECK: %xmm0 = VCVTPD2PSYrm %rdi, %ymm0, 1, %noreg, 0
- %xmm0 = VCVTPD2PSZ256rm %rdi, %ymm0, 1, %noreg, 0
- ; CHECK: %xmm0 = VCVTPD2PSYrr %ymm0
- %xmm0 = VCVTPD2PSZ256rr %ymm0
- ; CHECK: %ymm0 = VCVTPS2DQYrm %rdi, %ymm0, 1, %noreg, 0
- %ymm0 = VCVTPS2DQZ256rm %rdi, %ymm0, 1, %noreg, 0
- ; CHECK: %ymm0 = VCVTPS2DQYrr %ymm0
- %ymm0 = VCVTPS2DQZ256rr %ymm0
- ; CHECK: %ymm0 = VCVTPS2PDYrm %rdi, %ymm0, 1, %noreg, 0
- %ymm0 = VCVTPS2PDZ256rm %rdi, %ymm0, 1, %noreg, 0
- ; CHECK: %ymm0 = VCVTPS2PDYrr %xmm0
- %ymm0 = VCVTPS2PDZ256rr %xmm0
- ; CHECK: VCVTPS2PHYmr %rdi, %ymm0, 1, %noreg, 0, %noreg, %noreg
- VCVTPS2PHZ256mr %rdi, %ymm0, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %xmm0 = VCVTPS2PHYrr %ymm0, %noreg
- %xmm0 = VCVTPS2PHZ256rr %ymm0, %noreg
- ; CHECK: %ymm0 = VCVTPH2PSYrm %rdi, %ymm0, 1, %noreg, 0
- %ymm0 = VCVTPH2PSZ256rm %rdi, %ymm0, 1, %noreg, 0
- ; CHECK: %ymm0 = VCVTPH2PSYrr %xmm0
- %ymm0 = VCVTPH2PSZ256rr %xmm0
- ; CHECK: %xmm0 = VCVTTPD2DQYrm %rdi, %ymm0, 1, %noreg, 0
- %xmm0 = VCVTTPD2DQZ256rm %rdi, %ymm0, 1, %noreg, 0
- ; CHECK: %xmm0 = VCVTTPD2DQYrr %ymm0
- %xmm0 = VCVTTPD2DQZ256rr %ymm0
- ; CHECK: %ymm0 = VCVTTPS2DQYrm %rdi, %ymm0, 1, %noreg, 0
- %ymm0 = VCVTTPS2DQZ256rm %rdi, %ymm0, 1, %noreg, 0
- ; CHECK: %ymm0 = VCVTTPS2DQYrr %ymm0
- %ymm0 = VCVTTPS2DQZ256rr %ymm0
- ; CHECK: %ymm0 = VSQRTPDYm %rdi, %noreg, %noreg, %noreg, %noreg
- %ymm0 = VSQRTPDZ256m %rdi, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %ymm0 = VSQRTPDYr %ymm0
- %ymm0 = VSQRTPDZ256r %ymm0
- ; CHECK: %ymm0 = VSQRTPSYm %rdi, %noreg, %noreg, %noreg, %noreg
- %ymm0 = VSQRTPSZ256m %rdi, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %ymm0 = VSQRTPSYr %ymm0
- %ymm0 = VSQRTPSZ256r %ymm0
- ; CHECK: %ymm0 = VPALIGNRYrmi %ymm0, %rdi, %noreg, %noreg, %noreg, %noreg, %noreg
- %ymm0 = VPALIGNRZ256rmi %ymm0, %rdi, %noreg, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %ymm0 = VPALIGNRYrri %ymm0, %ymm1, %noreg
- %ymm0 = VPALIGNRZ256rri %ymm0, %ymm1, %noreg
- ; CHECK: %ymm0 = VMOVUPSYrm %rdi, 1, %noreg, 0, %noreg
- %ymm0 = VMOVUPSZ256rm %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm0 = VMOVUPSYrr %ymm0
- %ymm0 = VMOVUPSZ256rr %ymm0
- ; CHECK: %ymm0 = VMOVUPSYrr_REV %ymm0
- %ymm0 = VMOVUPSZ256rr_REV %ymm0
- ; CHECK: %ymm0 = VPSHUFBYrm %ymm0, %noreg, %noreg, %noreg, %noreg, %noreg
- %ymm0 = VPSHUFBZ256rm %ymm0, %noreg, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %ymm0 = VPSHUFBYrr %ymm0, %ymm1
- %ymm0 = VPSHUFBZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VPSHUFDYmi %rdi, 1, %noreg, 0, %noreg, %noreg
- %ymm0 = VPSHUFDZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %ymm0 = VPSHUFDYri %ymm0, -24
- %ymm0 = VPSHUFDZ256ri %ymm0, -24
- ; CHECK: %ymm0 = VPSHUFHWYmi %rdi, 1, %noreg, 0, %noreg, %noreg
- %ymm0 = VPSHUFHWZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %ymm0 = VPSHUFHWYri %ymm0, -24
- %ymm0 = VPSHUFHWZ256ri %ymm0, -24
- ; CHECK: %ymm0 = VPSHUFLWYmi %rdi, 1, %noreg, 0, %noreg, %noreg
- %ymm0 = VPSHUFLWZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %ymm0 = VPSHUFLWYri %ymm0, -24
- %ymm0 = VPSHUFLWZ256ri %ymm0, -24
- ; CHECK: %ymm0 = VSHUFPDYrmi %ymm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
- %ymm0 = VSHUFPDZ256rmi %ymm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %ymm0 = VSHUFPDYrri %ymm0, %noreg, %noreg
- %ymm0 = VSHUFPDZ256rri %ymm0, %noreg, %noreg
- ; CHECK: %ymm0 = VSHUFPSYrmi %ymm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
- %ymm0 = VSHUFPSZ256rmi %ymm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %ymm0 = VSHUFPSYrri %ymm0, %noreg, %noreg
- %ymm0 = VSHUFPSZ256rri %ymm0, %noreg, %noreg
+ ; CHECK: VMOVAPDYmr $rdi, 1, $noreg, 0, $noreg, $ymm0
+ VMOVAPDZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm0
+ ; CHECK: $ymm0 = VMOVAPDYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VMOVAPDZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VMOVAPDYrr $ymm0
+ $ymm0 = VMOVAPDZ256rr $ymm0
+ ; CHECK: $ymm0 = VMOVAPDYrr_REV $ymm0
+ $ymm0 = VMOVAPDZ256rr_REV $ymm0
+ ; CHECK: VMOVAPSYmr $rdi, 1, $noreg, 0, $noreg, $ymm0
+ VMOVAPSZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm0
+ ; CHECK: $ymm0 = VMOVAPSYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VMOVAPSZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VMOVAPSYrr $ymm0
+ $ymm0 = VMOVAPSZ256rr $ymm0
+ ; CHECK: $ymm0 = VMOVAPSYrr_REV $ymm0
+ $ymm0 = VMOVAPSZ256rr_REV $ymm0
+ ; CHECK: $ymm0 = VMOVDDUPYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VMOVDDUPZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VMOVDDUPYrr $ymm0
+ $ymm0 = VMOVDDUPZ256rr $ymm0
+ ; CHECK: VMOVDQAYmr $rdi, 1, $noreg, 0, $noreg, $ymm0
+ VMOVDQA32Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm0
+ ; CHECK: $ymm0 = VMOVDQAYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VMOVDQA32Z256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VMOVDQAYrr $ymm0
+ $ymm0 = VMOVDQA32Z256rr $ymm0
+ ; CHECK: $ymm0 = VMOVDQAYrr_REV $ymm0
+ $ymm0 = VMOVDQA32Z256rr_REV $ymm0
+ ; CHECK: VMOVDQAYmr $rdi, 1, $noreg, 0, $noreg, $ymm0
+ VMOVDQA64Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm0
+ ; CHECK: $ymm0 = VMOVDQAYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VMOVDQA64Z256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VMOVDQAYrr $ymm0
+ $ymm0 = VMOVDQA64Z256rr $ymm0
+ ; CHECK: $ymm0 = VMOVDQAYrr_REV $ymm0
+ $ymm0 = VMOVDQA64Z256rr_REV $ymm0
+ ; CHECK: VMOVDQUYmr $rdi, 1, $noreg, 0, $noreg, $ymm0
+ VMOVDQU16Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm0
+ ; CHECK: $ymm0 = VMOVDQUYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VMOVDQU16Z256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VMOVDQUYrr $ymm0
+ $ymm0 = VMOVDQU16Z256rr $ymm0
+ ; CHECK: $ymm0 = VMOVDQUYrr_REV $ymm0
+ $ymm0 = VMOVDQU16Z256rr_REV $ymm0
+ ; CHECK: VMOVDQUYmr $rdi, 1, $noreg, 0, $noreg, $ymm0
+ VMOVDQU32Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm0
+ ; CHECK: $ymm0 = VMOVDQUYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VMOVDQU32Z256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VMOVDQUYrr $ymm0
+ $ymm0 = VMOVDQU32Z256rr $ymm0
+ ; CHECK: $ymm0 = VMOVDQUYrr_REV $ymm0
+ $ymm0 = VMOVDQU32Z256rr_REV $ymm0
+ ; CHECK: VMOVDQUYmr $rdi, 1, $noreg, 0, $noreg, $ymm0
+ VMOVDQU64Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm0
+ ; CHECK: $ymm0 = VMOVDQUYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VMOVDQU64Z256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VMOVDQUYrr $ymm0
+ $ymm0 = VMOVDQU64Z256rr $ymm0
+ ; CHECK: $ymm0 = VMOVDQUYrr_REV $ymm0
+ $ymm0 = VMOVDQU64Z256rr_REV $ymm0
+ ; CHECK: VMOVDQUYmr $rdi, 1, $noreg, 0, $noreg, $ymm0
+ VMOVDQU8Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm0
+ ; CHECK: $ymm0 = VMOVDQUYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VMOVDQU8Z256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VMOVDQUYrr $ymm0
+ $ymm0 = VMOVDQU8Z256rr $ymm0
+ ; CHECK: $ymm0 = VMOVDQUYrr_REV $ymm0
+ $ymm0 = VMOVDQU8Z256rr_REV $ymm0
+ ; CHECK: $ymm0 = VMOVNTDQAYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VMOVNTDQAZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: VMOVNTDQYmr $rdi, 1, $noreg, 0, $noreg, $ymm0
+ VMOVNTDQZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm0
+ ; CHECK: VMOVNTPDYmr $rdi, 1, $noreg, 0, $noreg, $ymm0
+ VMOVNTPDZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm0
+ ; CHECK: VMOVNTPSYmr $rdi, 1, $noreg, 0, $noreg, $ymm0
+ VMOVNTPSZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm0
+ ; CHECK: $ymm0 = VMOVSHDUPYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VMOVSHDUPZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VMOVSHDUPYrr $ymm0
+ $ymm0 = VMOVSHDUPZ256rr $ymm0
+ ; CHECK: $ymm0 = VMOVSLDUPYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VMOVSLDUPZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VMOVSLDUPYrr $ymm0
+ $ymm0 = VMOVSLDUPZ256rr $ymm0
+ ; CHECK: VMOVUPDYmr $rdi, 1, $noreg, 0, $noreg, $ymm0
+ VMOVUPDZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm0
+ ; CHECK: $ymm0 = VMOVUPDYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VMOVUPDZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VMOVUPDYrr $ymm0
+ $ymm0 = VMOVUPDZ256rr $ymm0
+ ; CHECK: $ymm0 = VMOVUPDYrr_REV $ymm0
+ $ymm0 = VMOVUPDZ256rr_REV $ymm0
+ ; CHECK: VMOVUPSYmr $rdi, 1, $noreg, 0, $noreg, $ymm0
+ VMOVUPSZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm0
+ ; CHECK: $ymm0 = VPANDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPANDDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPANDYrr $ymm0, $ymm1
+ $ymm0 = VPANDDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPANDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPANDQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPANDYrr $ymm0, $ymm1
+ $ymm0 = VPANDQZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPANDNYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPANDNDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPANDNYrr $ymm0, $ymm1
+ $ymm0 = VPANDNDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPANDNYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPANDNQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPANDNYrr $ymm0, $ymm1
+ $ymm0 = VPANDNQZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPAVGBYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPAVGBZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPAVGBYrr $ymm0, $ymm1
+ $ymm0 = VPAVGBZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPAVGWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPAVGWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPAVGWYrr $ymm0, $ymm1
+ $ymm0 = VPAVGWZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPADDBYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPADDBZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPADDBYrr $ymm0, $ymm1
+ $ymm0 = VPADDBZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPADDDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPADDDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPADDDYrr $ymm0, $ymm1
+ $ymm0 = VPADDDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPADDQYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPADDQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPADDQYrr $ymm0, $ymm1
+ $ymm0 = VPADDQZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPADDSBYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPADDSBZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPADDSBYrr $ymm0, $ymm1
+ $ymm0 = VPADDSBZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPADDSWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPADDSWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPADDSWYrr $ymm0, $ymm1
+ $ymm0 = VPADDSWZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPADDUSBYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPADDUSBZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPADDUSBYrr $ymm0, $ymm1
+ $ymm0 = VPADDUSBZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPADDUSWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPADDUSWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPADDUSWYrr $ymm0, $ymm1
+ $ymm0 = VPADDUSWZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPADDWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPADDWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPADDWYrr $ymm0, $ymm1
+ $ymm0 = VPADDWZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VMULPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VMULPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VMULPDYrr $ymm0, $ymm1
+ $ymm0 = VMULPDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VMULPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VMULPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VMULPSYrr $ymm0, $ymm1
+ $ymm0 = VMULPSZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VORPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VORPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VORPDYrr $ymm0, $ymm1
+ $ymm0 = VORPDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VORPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VORPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VORPSYrr $ymm0, $ymm1
+ $ymm0 = VORPSZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPMADDUBSWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMADDUBSWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMADDUBSWYrr $ymm0, $ymm1
+ $ymm0 = VPMADDUBSWZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPMADDWDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMADDWDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMADDWDYrr $ymm0, $ymm1
+ $ymm0 = VPMADDWDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPMAXSBYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMAXSBZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMAXSBYrr $ymm0, $ymm1
+ $ymm0 = VPMAXSBZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPMAXSDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMAXSDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMAXSDYrr $ymm0, $ymm1
+ $ymm0 = VPMAXSDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPMAXSWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMAXSWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMAXSWYrr $ymm0, $ymm1
+ $ymm0 = VPMAXSWZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPMAXUBYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMAXUBZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMAXUBYrr $ymm0, $ymm1
+ $ymm0 = VPMAXUBZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPMAXUDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMAXUDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMAXUDYrr $ymm0, $ymm1
+ $ymm0 = VPMAXUDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPMAXUWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMAXUWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMAXUWYrr $ymm0, $ymm1
+ $ymm0 = VPMAXUWZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPMINSBYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMINSBZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMINSBYrr $ymm0, $ymm1
+ $ymm0 = VPMINSBZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPMINSDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMINSDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMINSDYrr $ymm0, $ymm1
+ $ymm0 = VPMINSDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPMINSWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMINSWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMINSWYrr $ymm0, $ymm1
+ $ymm0 = VPMINSWZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPMINUBYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMINUBZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMINUBYrr $ymm0, $ymm1
+ $ymm0 = VPMINUBZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPMINUDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMINUDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMINUDYrr $ymm0, $ymm1
+ $ymm0 = VPMINUDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPMINUWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMINUWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMINUWYrr $ymm0, $ymm1
+ $ymm0 = VPMINUWZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPMULDQYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMULDQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMULDQYrr $ymm0, $ymm1
+ $ymm0 = VPMULDQZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPMULHRSWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMULHRSWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMULHRSWYrr $ymm0, $ymm1
+ $ymm0 = VPMULHRSWZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPMULHUWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMULHUWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMULHUWYrr $ymm0, $ymm1
+ $ymm0 = VPMULHUWZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPMULHWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMULHWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMULHWYrr $ymm0, $ymm1
+ $ymm0 = VPMULHWZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPMULLDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMULLDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMULLDYrr $ymm0, $ymm1
+ $ymm0 = VPMULLDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPMULLWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMULLWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMULLWYrr $ymm0, $ymm1
+ $ymm0 = VPMULLWZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPMULUDQYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMULUDQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMULUDQYrr $ymm0, $ymm1
+ $ymm0 = VPMULUDQZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPORYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPORDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPORYrr $ymm0, $ymm1
+ $ymm0 = VPORDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPORYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPORQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPORYrr $ymm0, $ymm1
+ $ymm0 = VPORQZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPSUBBYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPSUBBZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPSUBBYrr $ymm0, $ymm1
+ $ymm0 = VPSUBBZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPSUBDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPSUBDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPSUBDYrr $ymm0, $ymm1
+ $ymm0 = VPSUBDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPSUBQYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPSUBQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPSUBQYrr $ymm0, $ymm1
+ $ymm0 = VPSUBQZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPSUBSBYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPSUBSBZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPSUBSBYrr $ymm0, $ymm1
+ $ymm0 = VPSUBSBZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPSUBSWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPSUBSWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPSUBSWYrr $ymm0, $ymm1
+ $ymm0 = VPSUBSWZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPSUBUSBYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPSUBUSBZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPSUBUSBYrr $ymm0, $ymm1
+ $ymm0 = VPSUBUSBZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPSUBUSWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPSUBUSWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPSUBUSWYrr $ymm0, $ymm1
+ $ymm0 = VPSUBUSWZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPSUBWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPSUBWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPSUBWYrr $ymm0, $ymm1
+ $ymm0 = VPSUBWZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPXORYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPXORDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPXORYrr $ymm0, $ymm1
+ $ymm0 = VPXORDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPXORYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPXORQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPXORYrr $ymm0, $ymm1
+ $ymm0 = VPXORQZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VADDPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VADDPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VADDPDYrr $ymm0, $ymm1
+ $ymm0 = VADDPDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VADDPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VADDPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VADDPSYrr $ymm0, $ymm1
+ $ymm0 = VADDPSZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VANDNPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VANDNPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VANDNPDYrr $ymm0, $ymm1
+ $ymm0 = VANDNPDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VANDNPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VANDNPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VANDNPSYrr $ymm0, $ymm1
+ $ymm0 = VANDNPSZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VANDPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VANDPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VANDPDYrr $ymm0, $ymm1
+ $ymm0 = VANDPDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VANDPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VANDPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VANDPSYrr $ymm0, $ymm1
+ $ymm0 = VANDPSZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VDIVPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VDIVPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VDIVPDYrr $ymm0, $ymm1
+ $ymm0 = VDIVPDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VDIVPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VDIVPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VDIVPSYrr $ymm0, $ymm1
+ $ymm0 = VDIVPSZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VMAXCPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VMAXCPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VMAXCPDYrr $ymm0, $ymm1
+ $ymm0 = VMAXCPDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VMAXCPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VMAXCPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VMAXCPSYrr $ymm0, $ymm1
+ $ymm0 = VMAXCPSZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VMAXCPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VMAXPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VMAXCPDYrr $ymm0, $ymm1
+ $ymm0 = VMAXPDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VMAXCPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VMAXPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VMAXCPSYrr $ymm0, $ymm1
+ $ymm0 = VMAXPSZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VMINCPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VMINCPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VMINCPDYrr $ymm0, $ymm1
+ $ymm0 = VMINCPDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VMINCPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VMINCPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VMINCPSYrr $ymm0, $ymm1
+ $ymm0 = VMINCPSZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VMINCPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VMINPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VMINCPDYrr $ymm0, $ymm1
+ $ymm0 = VMINPDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VMINCPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VMINPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VMINCPSYrr $ymm0, $ymm1
+ $ymm0 = VMINPSZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VXORPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VXORPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VXORPDYrr $ymm0, $ymm1
+ $ymm0 = VXORPDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VXORPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VXORPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VXORPSYrr $ymm0, $ymm1
+ $ymm0 = VXORPSZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPACKSSDWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPACKSSDWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPACKSSDWYrr $ymm0, $ymm1
+ $ymm0 = VPACKSSDWZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPACKSSWBYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPACKSSWBZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPACKSSWBYrr $ymm0, $ymm1
+ $ymm0 = VPACKSSWBZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPACKUSDWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPACKUSDWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPACKUSDWYrr $ymm0, $ymm1
+ $ymm0 = VPACKUSDWZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPACKUSWBYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPACKUSWBZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPACKUSWBYrr $ymm0, $ymm1
+ $ymm0 = VPACKUSWBZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VUNPCKHPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VUNPCKHPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VUNPCKHPDYrr $ymm0, $ymm1
+ $ymm0 = VUNPCKHPDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VUNPCKHPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VUNPCKHPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VUNPCKHPSYrr $ymm0, $ymm1
+ $ymm0 = VUNPCKHPSZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VUNPCKLPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VUNPCKLPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VUNPCKLPDYrr $ymm0, $ymm1
+ $ymm0 = VUNPCKLPDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VUNPCKLPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VUNPCKLPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VUNPCKLPSYrr $ymm0, $ymm1
+ $ymm0 = VUNPCKLPSZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VSUBPDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VSUBPDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VSUBPDYrr $ymm0, $ymm1
+ $ymm0 = VSUBPDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VSUBPSYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VSUBPSZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VSUBPSYrr $ymm0, $ymm1
+ $ymm0 = VSUBPSZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPUNPCKHBWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPUNPCKHBWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPUNPCKHBWYrr $ymm0, $ymm1
+ $ymm0 = VPUNPCKHBWZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPUNPCKHDQYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPUNPCKHDQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPUNPCKHDQYrr $ymm0, $ymm1
+ $ymm0 = VPUNPCKHDQZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPUNPCKHQDQYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPUNPCKHQDQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPUNPCKHQDQYrr $ymm0, $ymm1
+ $ymm0 = VPUNPCKHQDQZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPUNPCKHWDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPUNPCKHWDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPUNPCKHWDYrr $ymm0, $ymm1
+ $ymm0 = VPUNPCKHWDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPUNPCKLBWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPUNPCKLBWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPUNPCKLBWYrr $ymm0, $ymm1
+ $ymm0 = VPUNPCKLBWZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPUNPCKLDQYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPUNPCKLDQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPUNPCKLDQYrr $ymm0, $ymm1
+ $ymm0 = VPUNPCKLDQZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPUNPCKLQDQYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPUNPCKLQDQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPUNPCKLQDQYrr $ymm0, $ymm1
+ $ymm0 = VPUNPCKLQDQZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPUNPCKLWDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPUNPCKLWDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPUNPCKLWDYrr $ymm0, $ymm1
+ $ymm0 = VPUNPCKLWDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VFMADD132PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFMADD132PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFMADD132PDYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFMADD132PDZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFMADD132PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFMADD132PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFMADD132PSYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFMADD132PSZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFMADD213PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFMADD213PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFMADD213PDYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFMADD213PDZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFMADD213PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFMADD213PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFMADD213PSYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFMADD213PSZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFMADD231PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFMADD231PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFMADD231PDYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFMADD231PDZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFMADD231PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFMADD231PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFMADD231PSYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFMADD231PSZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFMADDSUB132PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFMADDSUB132PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFMADDSUB132PDYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFMADDSUB132PDZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFMADDSUB132PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFMADDSUB132PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFMADDSUB132PSYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFMADDSUB132PSZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFMADDSUB213PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFMADDSUB213PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFMADDSUB213PDYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFMADDSUB213PDZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFMADDSUB213PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFMADDSUB213PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFMADDSUB213PSYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFMADDSUB213PSZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFMADDSUB231PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFMADDSUB231PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFMADDSUB231PDYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFMADDSUB231PDZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFMADDSUB231PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFMADDSUB231PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFMADDSUB231PSYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFMADDSUB231PSZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFMSUB132PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFMSUB132PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFMSUB132PDYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFMSUB132PDZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFMSUB132PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFMSUB132PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFMSUB132PSYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFMSUB132PSZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFMSUB213PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFMSUB213PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFMSUB213PDYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFMSUB213PDZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFMSUB213PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFMSUB213PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFMSUB213PSYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFMSUB213PSZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFMSUB231PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFMSUB231PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFMSUB231PDYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFMSUB231PDZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFMSUB231PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFMSUB231PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFMSUB231PSYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFMSUB231PSZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFMSUBADD132PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFMSUBADD132PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFMSUBADD132PDYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFMSUBADD132PDZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFMSUBADD132PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFMSUBADD132PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFMSUBADD132PSYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFMSUBADD132PSZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFMSUBADD213PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFMSUBADD213PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFMSUBADD213PDYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFMSUBADD213PDZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFMSUBADD213PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFMSUBADD213PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFMSUBADD213PSYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFMSUBADD213PSZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFMSUBADD231PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFMSUBADD231PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFMSUBADD231PDYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFMSUBADD231PDZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFMSUBADD231PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFMSUBADD231PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFMSUBADD231PSYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFMSUBADD231PSZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFNMADD132PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFNMADD132PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFNMADD132PDYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFNMADD132PDZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFNMADD132PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFNMADD132PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFNMADD132PSYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFNMADD132PSZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFNMADD213PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFNMADD213PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFNMADD213PDYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFNMADD213PDZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFNMADD213PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFNMADD213PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFNMADD213PSYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFNMADD213PSZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFNMADD231PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFNMADD231PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFNMADD231PDYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFNMADD231PDZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFNMADD231PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFNMADD231PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFNMADD231PSYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFNMADD231PSZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFNMSUB132PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFNMSUB132PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFNMSUB132PDYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFNMSUB132PDZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFNMSUB132PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFNMSUB132PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFNMSUB132PSYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFNMSUB132PSZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFNMSUB213PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFNMSUB213PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFNMSUB213PDYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFNMSUB213PDZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFNMSUB213PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFNMSUB213PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFNMSUB213PSYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFNMSUB213PSZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFNMSUB231PDYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFNMSUB231PDZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFNMSUB231PDYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFNMSUB231PDZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VFNMSUB231PSYm $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ $ymm0 = VFNMSUB231PSZ256m $ymm0, $ymm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VFNMSUB231PSYr $ymm0, $ymm1, $ymm2
+ $ymm0 = VFNMSUB231PSZ256r $ymm0, $ymm1, $ymm2
+ ; CHECK: $ymm0 = VPSRADYri $ymm0, 7
+ $ymm0 = VPSRADZ256ri $ymm0, 7
+ ; CHECK: $ymm0 = VPSRADYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPSRADZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPSRADYrr $ymm0, $xmm1
+ $ymm0 = VPSRADZ256rr $ymm0, $xmm1
+ ; CHECK: $ymm0 = VPSRAVDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPSRAVDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPSRAVDYrr $ymm0, $ymm1
+ $ymm0 = VPSRAVDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPSRAWYri $ymm0, 7
+ $ymm0 = VPSRAWZ256ri $ymm0, 7
+ ; CHECK: $ymm0 = VPSRAWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPSRAWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPSRAWYrr $ymm0, $xmm1
+ $ymm0 = VPSRAWZ256rr $ymm0, $xmm1
+ ; CHECK: $ymm0 = VPSRLDQYri $ymm0, $ymm1
+ $ymm0 = VPSRLDQZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPSRLDYri $ymm0, 7
+ $ymm0 = VPSRLDZ256ri $ymm0, 7
+ ; CHECK: $ymm0 = VPSRLDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPSRLDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPSRLDYrr $ymm0, $xmm1
+ $ymm0 = VPSRLDZ256rr $ymm0, $xmm1
+ ; CHECK: $ymm0 = VPSRLQYri $ymm0, 7
+ $ymm0 = VPSRLQZ256ri $ymm0, 7
+ ; CHECK: $ymm0 = VPSRLQYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPSRLQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPSRLQYrr $ymm0, $xmm1
+ $ymm0 = VPSRLQZ256rr $ymm0, $xmm1
+ ; CHECK: $ymm0 = VPSRLVDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPSRLVDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPSRLVDYrr $ymm0, $ymm1
+ $ymm0 = VPSRLVDZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPSRLVQYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPSRLVQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPSRLVQYrr $ymm0, $ymm1
+ $ymm0 = VPSRLVQZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPSRLWYri $ymm0, 7
+ $ymm0 = VPSRLWZ256ri $ymm0, 7
+ ; CHECK: $ymm0 = VPSRLWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPSRLWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPSRLWYrr $ymm0, $xmm1
+ $ymm0 = VPSRLWZ256rr $ymm0, $xmm1
+ ; CHECK: $ymm0 = VPMOVSXBDYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMOVSXBDZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMOVSXBDYrr $xmm0
+ $ymm0 = VPMOVSXBDZ256rr $xmm0
+ ; CHECK: $ymm0 = VPMOVSXBQYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMOVSXBQZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMOVSXBQYrr $xmm0
+ $ymm0 = VPMOVSXBQZ256rr $xmm0
+ ; CHECK: $ymm0 = VPMOVSXBWYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMOVSXBWZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMOVSXBWYrr $xmm0
+ $ymm0 = VPMOVSXBWZ256rr $xmm0
+ ; CHECK: $ymm0 = VPMOVSXDQYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMOVSXDQZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMOVSXDQYrr $xmm0
+ $ymm0 = VPMOVSXDQZ256rr $xmm0
+ ; CHECK: $ymm0 = VPMOVSXWDYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMOVSXWDZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMOVSXWDYrr $xmm0
+ $ymm0 = VPMOVSXWDZ256rr $xmm0
+ ; CHECK: $ymm0 = VPMOVSXWQYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMOVSXWQZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMOVSXWQYrr $xmm0
+ $ymm0 = VPMOVSXWQZ256rr $xmm0
+ ; CHECK: $ymm0 = VPMOVZXBDYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMOVZXBDZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMOVZXBDYrr $xmm0
+ $ymm0 = VPMOVZXBDZ256rr $xmm0
+ ; CHECK: $ymm0 = VPMOVZXBQYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMOVZXBQZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMOVZXBQYrr $xmm0
+ $ymm0 = VPMOVZXBQZ256rr $xmm0
+ ; CHECK: $ymm0 = VPMOVZXBWYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMOVZXBWZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMOVZXBWYrr $xmm0
+ $ymm0 = VPMOVZXBWZ256rr $xmm0
+ ; CHECK: $ymm0 = VPMOVZXDQYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMOVZXDQZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMOVZXDQYrr $xmm0
+ $ymm0 = VPMOVZXDQZ256rr $xmm0
+ ; CHECK: $ymm0 = VPMOVZXWDYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMOVZXWDZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMOVZXWDYrr $xmm0
+ $ymm0 = VPMOVZXWDZ256rr $xmm0
+ ; CHECK: $ymm0 = VPMOVZXWQYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPMOVZXWQZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPMOVZXWQYrr $xmm0
+ $ymm0 = VPMOVZXWQZ256rr $xmm0
+ ; CHECK: $ymm0 = VBROADCASTF128 $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VBROADCASTF32X4Z256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VBROADCASTSDYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VBROADCASTF32X2Z256m $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VBROADCASTSDYrr $xmm0
+ $ymm0 = VBROADCASTF32X2Z256r $xmm0
+ ; CHECK: $ymm0 = VBROADCASTSDYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VBROADCASTSDZ256m $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VBROADCASTSDYrr $xmm0
+ $ymm0 = VBROADCASTSDZ256r $xmm0
+ ; CHECK: $ymm0 = VBROADCASTSSYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VBROADCASTSSZ256m $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VBROADCASTSSYrr $xmm0
+ $ymm0 = VBROADCASTSSZ256r $xmm0
+ ; CHECK: $ymm0 = VPBROADCASTBYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPBROADCASTBZ256m $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPBROADCASTBYrr $xmm0
+ $ymm0 = VPBROADCASTBZ256r $xmm0
+ ; CHECK: $ymm0 = VPBROADCASTDYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPBROADCASTDZ256m $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPBROADCASTDYrr $xmm0
+ $ymm0 = VPBROADCASTDZ256r $xmm0
+ ; CHECK: $ymm0 = VPBROADCASTWYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPBROADCASTWZ256m $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPBROADCASTWYrr $xmm0
+ $ymm0 = VPBROADCASTWZ256r $xmm0
+ ; CHECK: $ymm0 = VBROADCASTI128 $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VBROADCASTI32X4Z256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPBROADCASTQYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VBROADCASTI32X2Z256m $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPBROADCASTQYrr $xmm0
+ $ymm0 = VBROADCASTI32X2Z256r $xmm0
+ ; CHECK: $ymm0 = VPBROADCASTQYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPBROADCASTQZ256m $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPBROADCASTQYrr $xmm0
+ $ymm0 = VPBROADCASTQZ256r $xmm0
+ ; CHECK: $ymm0 = VPABSBYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPABSBZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPABSBYrr $ymm0
+ $ymm0 = VPABSBZ256rr $ymm0
+ ; CHECK: $ymm0 = VPABSDYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPABSDZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPABSDYrr $ymm0
+ $ymm0 = VPABSDZ256rr $ymm0
+ ; CHECK: $ymm0 = VPABSWYrm $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPABSWZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPABSWYrr $ymm0
+ $ymm0 = VPABSWZ256rr $ymm0
+ ; CHECK: $ymm0 = VPSADBWYrm $ymm0, 1, $noreg, $rax, $noreg, $noreg
+ $ymm0 = VPSADBWZ256rm $ymm0, 1, $noreg, $rax, $noreg, $noreg
+ ; CHECK: $ymm0 = VPSADBWYrr $ymm0, $ymm1
+ $ymm0 = VPSADBWZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPERMDYrm $ymm0, $rdi, 1, $noreg, 0, $noreg
+ $ymm0 = VPERMDZ256rm $ymm0, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VPERMDYrr $ymm1, $ymm0
+ $ymm0 = VPERMDZ256rr $ymm1, $ymm0
+ ; CHECK: $ymm0 = VPERMILPDYmi $rdi, 1, $noreg, 0, $noreg, $noreg
+ $ymm0 = VPERMILPDZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $ymm0 = VPERMILPDYri $ymm0, 7
+ $ymm0 = VPERMILPDZ256ri $ymm0, 7
+ ; CHECK: $ymm0 = VPERMILPDYrm $ymm0, $rdi, 1, $noreg, 0, $noreg
+ $ymm0 = VPERMILPDZ256rm $ymm0, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VPERMILPDYrr $ymm1, $ymm0
+ $ymm0 = VPERMILPDZ256rr $ymm1, $ymm0
+ ; CHECK: $ymm0 = VPERMILPSYmi $rdi, 1, $noreg, 0, $noreg, $noreg
+ $ymm0 = VPERMILPSZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $ymm0 = VPERMILPSYri $ymm0, 7
+ $ymm0 = VPERMILPSZ256ri $ymm0, 7
+ ; CHECK: $ymm0 = VPERMILPSYrm $ymm0, $rdi, 1, $noreg, 0, $noreg
+ $ymm0 = VPERMILPSZ256rm $ymm0, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VPERMILPSYrr $ymm1, $ymm0
+ $ymm0 = VPERMILPSZ256rr $ymm1, $ymm0
+ ; CHECK: $ymm0 = VPERMPDYmi $rdi, 1, $noreg, 0, $noreg, $noreg
+ $ymm0 = VPERMPDZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $ymm0 = VPERMPDYri $ymm0, 7
+ $ymm0 = VPERMPDZ256ri $ymm0, 7
+ ; CHECK: $ymm0 = VPERMPSYrm $ymm0, $rdi, 1, $noreg, 0, $noreg
+ $ymm0 = VPERMPSZ256rm $ymm0, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VPERMPSYrr $ymm1, $ymm0
+ $ymm0 = VPERMPSZ256rr $ymm1, $ymm0
+ ; CHECK: $ymm0 = VPERMQYmi $rdi, 1, $noreg, 0, $noreg, $noreg
+ $ymm0 = VPERMQZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $ymm0 = VPERMQYri $ymm0, 7
+ $ymm0 = VPERMQZ256ri $ymm0, 7
+ ; CHECK: $ymm0 = VPSLLDQYri $ymm0, 14
+ $ymm0 = VPSLLDQZ256rr $ymm0, 14
+ ; CHECK: $ymm0 = VPSLLDYri $ymm0, 7
+ $ymm0 = VPSLLDZ256ri $ymm0, 7
+ ; CHECK: $ymm0 = VPSLLDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPSLLDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPSLLDYrr $ymm0, 14
+ $ymm0 = VPSLLDZ256rr $ymm0, 14
+ ; CHECK: $ymm0 = VPSLLQYri $ymm0, 7
+ $ymm0 = VPSLLQZ256ri $ymm0, 7
+ ; CHECK: $ymm0 = VPSLLQYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPSLLQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPSLLQYrr $ymm0, 14
+ $ymm0 = VPSLLQZ256rr $ymm0, 14
+ ; CHECK: $ymm0 = VPSLLVDYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPSLLVDZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPSLLVDYrr $ymm0, 14
+ $ymm0 = VPSLLVDZ256rr $ymm0, 14
+ ; CHECK: $ymm0 = VPSLLVQYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPSLLVQZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPSLLVQYrr $ymm0, 14
+ $ymm0 = VPSLLVQZ256rr $ymm0, 14
+ ; CHECK: $ymm0 = VPSLLWYri $ymm0, 7
+ $ymm0 = VPSLLWZ256ri $ymm0, 7
+ ; CHECK: $ymm0 = VPSLLWYrm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ $ymm0 = VPSLLWZ256rm $ymm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm0 = VPSLLWYrr $ymm0, 14
+ $ymm0 = VPSLLWZ256rr $ymm0, 14
+ ; CHECK: $ymm0 = VCVTDQ2PDYrm $rdi, $ymm0, 1, $noreg, 0
+ $ymm0 = VCVTDQ2PDZ256rm $rdi, $ymm0, 1, $noreg, 0
+ ; CHECK: $ymm0 = VCVTDQ2PDYrr $xmm0
+ $ymm0 = VCVTDQ2PDZ256rr $xmm0
+ ; CHECK: $ymm0 = VCVTDQ2PSYrm $rdi, $ymm0, 1, $noreg, 0
+ $ymm0 = VCVTDQ2PSZ256rm $rdi, $ymm0, 1, $noreg, 0
+ ; CHECK: $ymm0 = VCVTDQ2PSYrr $ymm0
+ $ymm0 = VCVTDQ2PSZ256rr $ymm0
+ ; CHECK: $xmm0 = VCVTPD2DQYrm $rdi, $ymm0, 1, $noreg, 0
+ $xmm0 = VCVTPD2DQZ256rm $rdi, $ymm0, 1, $noreg, 0
+ ; CHECK: $xmm0 = VCVTPD2DQYrr $ymm0
+ $xmm0 = VCVTPD2DQZ256rr $ymm0
+ ; CHECK: $xmm0 = VCVTPD2PSYrm $rdi, $ymm0, 1, $noreg, 0
+ $xmm0 = VCVTPD2PSZ256rm $rdi, $ymm0, 1, $noreg, 0
+ ; CHECK: $xmm0 = VCVTPD2PSYrr $ymm0
+ $xmm0 = VCVTPD2PSZ256rr $ymm0
+ ; CHECK: $ymm0 = VCVTPS2DQYrm $rdi, $ymm0, 1, $noreg, 0
+ $ymm0 = VCVTPS2DQZ256rm $rdi, $ymm0, 1, $noreg, 0
+ ; CHECK: $ymm0 = VCVTPS2DQYrr $ymm0
+ $ymm0 = VCVTPS2DQZ256rr $ymm0
+ ; CHECK: $ymm0 = VCVTPS2PDYrm $rdi, $ymm0, 1, $noreg, 0
+ $ymm0 = VCVTPS2PDZ256rm $rdi, $ymm0, 1, $noreg, 0
+ ; CHECK: $ymm0 = VCVTPS2PDYrr $xmm0
+ $ymm0 = VCVTPS2PDZ256rr $xmm0
+ ; CHECK: VCVTPS2PHYmr $rdi, $ymm0, 1, $noreg, 0, $noreg, $noreg
+ VCVTPS2PHZ256mr $rdi, $ymm0, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $xmm0 = VCVTPS2PHYrr $ymm0, $noreg
+ $xmm0 = VCVTPS2PHZ256rr $ymm0, $noreg
+ ; CHECK: $ymm0 = VCVTPH2PSYrm $rdi, $ymm0, 1, $noreg, 0
+ $ymm0 = VCVTPH2PSZ256rm $rdi, $ymm0, 1, $noreg, 0
+ ; CHECK: $ymm0 = VCVTPH2PSYrr $xmm0
+ $ymm0 = VCVTPH2PSZ256rr $xmm0
+ ; CHECK: $xmm0 = VCVTTPD2DQYrm $rdi, $ymm0, 1, $noreg, 0
+ $xmm0 = VCVTTPD2DQZ256rm $rdi, $ymm0, 1, $noreg, 0
+ ; CHECK: $xmm0 = VCVTTPD2DQYrr $ymm0
+ $xmm0 = VCVTTPD2DQZ256rr $ymm0
+ ; CHECK: $ymm0 = VCVTTPS2DQYrm $rdi, $ymm0, 1, $noreg, 0
+ $ymm0 = VCVTTPS2DQZ256rm $rdi, $ymm0, 1, $noreg, 0
+ ; CHECK: $ymm0 = VCVTTPS2DQYrr $ymm0
+ $ymm0 = VCVTTPS2DQZ256rr $ymm0
+ ; CHECK: $ymm0 = VSQRTPDYm $rdi, $noreg, $noreg, $noreg, $noreg
+ $ymm0 = VSQRTPDZ256m $rdi, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $ymm0 = VSQRTPDYr $ymm0
+ $ymm0 = VSQRTPDZ256r $ymm0
+ ; CHECK: $ymm0 = VSQRTPSYm $rdi, $noreg, $noreg, $noreg, $noreg
+ $ymm0 = VSQRTPSZ256m $rdi, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $ymm0 = VSQRTPSYr $ymm0
+ $ymm0 = VSQRTPSZ256r $ymm0
+ ; CHECK: $ymm0 = VPALIGNRYrmi $ymm0, $rdi, $noreg, $noreg, $noreg, $noreg, $noreg
+ $ymm0 = VPALIGNRZ256rmi $ymm0, $rdi, $noreg, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $ymm0 = VPALIGNRYrri $ymm0, $ymm1, $noreg
+ $ymm0 = VPALIGNRZ256rri $ymm0, $ymm1, $noreg
+ ; CHECK: $ymm0 = VMOVUPSYrm $rdi, 1, $noreg, 0, $noreg
+ $ymm0 = VMOVUPSZ256rm $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm0 = VMOVUPSYrr $ymm0
+ $ymm0 = VMOVUPSZ256rr $ymm0
+ ; CHECK: $ymm0 = VMOVUPSYrr_REV $ymm0
+ $ymm0 = VMOVUPSZ256rr_REV $ymm0
+ ; CHECK: $ymm0 = VPSHUFBYrm $ymm0, $noreg, $noreg, $noreg, $noreg, $noreg
+ $ymm0 = VPSHUFBZ256rm $ymm0, $noreg, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $ymm0 = VPSHUFBYrr $ymm0, $ymm1
+ $ymm0 = VPSHUFBZ256rr $ymm0, $ymm1
+ ; CHECK: $ymm0 = VPSHUFDYmi $rdi, 1, $noreg, 0, $noreg, $noreg
+ $ymm0 = VPSHUFDZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $ymm0 = VPSHUFDYri $ymm0, -24
+ $ymm0 = VPSHUFDZ256ri $ymm0, -24
+ ; CHECK: $ymm0 = VPSHUFHWYmi $rdi, 1, $noreg, 0, $noreg, $noreg
+ $ymm0 = VPSHUFHWZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $ymm0 = VPSHUFHWYri $ymm0, -24
+ $ymm0 = VPSHUFHWZ256ri $ymm0, -24
+ ; CHECK: $ymm0 = VPSHUFLWYmi $rdi, 1, $noreg, 0, $noreg, $noreg
+ $ymm0 = VPSHUFLWZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $ymm0 = VPSHUFLWYri $ymm0, -24
+ $ymm0 = VPSHUFLWZ256ri $ymm0, -24
+ ; CHECK: $ymm0 = VSHUFPDYrmi $ymm0, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg
+ $ymm0 = VSHUFPDZ256rmi $ymm0, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $ymm0 = VSHUFPDYrri $ymm0, $noreg, $noreg
+ $ymm0 = VSHUFPDZ256rri $ymm0, $noreg, $noreg
+ ; CHECK: $ymm0 = VSHUFPSYrmi $ymm0, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg
+ $ymm0 = VSHUFPSZ256rmi $ymm0, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $ymm0 = VSHUFPSYrri $ymm0, $noreg, $noreg
+ $ymm0 = VSHUFPSZ256rri $ymm0, $noreg, $noreg
- RET 0, %zmm0, %zmm1
+ RET 0, $zmm0, $zmm1
...
---
# CHECK-LABEL: name: evex_z128_to_vex_test
@@ -899,868 +899,868 @@
name: evex_z128_to_vex_test
body: |
bb.0:
- ; CHECK: VMOVAPDmr %rdi, 1, %noreg, 0, %noreg, %xmm0
- VMOVAPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
- ; CHECK: %xmm0 = VMOVAPDrm %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMOVAPDZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMOVAPDrr %xmm0
- %xmm0 = VMOVAPDZ128rr %xmm0
- ; CHECK: VMOVAPSmr %rdi, 1, %noreg, 0, %noreg, %xmm0
- VMOVAPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
- ; CHECK: %xmm0 = VMOVAPSrm %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMOVAPSZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMOVAPSrr %xmm0
- %xmm0 = VMOVAPSZ128rr %xmm0
- ; CHECK: VMOVDQAmr %rdi, 1, %noreg, 0, %noreg, %xmm0
- VMOVDQA32Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
- ; CHECK: %xmm0 = VMOVDQArm %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMOVDQA32Z128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMOVDQArr %xmm0
- %xmm0 = VMOVDQA32Z128rr %xmm0
- ; CHECK: VMOVDQAmr %rdi, 1, %noreg, 0, %noreg, %xmm0
- VMOVDQA64Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
- ; CHECK: %xmm0 = VMOVDQArm %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMOVDQA64Z128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMOVDQArr %xmm0
- %xmm0 = VMOVDQA64Z128rr %xmm0
- ; CHECK: VMOVDQUmr %rdi, 1, %noreg, 0, %noreg, %xmm0
- VMOVDQU16Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
- ; CHECK: %xmm0 = VMOVDQUrm %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMOVDQU16Z128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMOVDQUrr %xmm0
- %xmm0 = VMOVDQU16Z128rr %xmm0
- ; CHECK: VMOVDQUmr %rdi, 1, %noreg, 0, %noreg, %xmm0
- VMOVDQU32Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
- ; CHECK: %xmm0 = VMOVDQUrm %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMOVDQU32Z128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMOVDQUrr %xmm0
- %xmm0 = VMOVDQU32Z128rr %xmm0
- ; CHECK: VMOVDQUmr %rdi, 1, %noreg, 0, %noreg, %xmm0
- VMOVDQU64Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
- ; CHECK: %xmm0 = VMOVDQUrm %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMOVDQU64Z128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMOVDQUrr %xmm0
- %xmm0 = VMOVDQU64Z128rr %xmm0
- ; CHECK: VMOVDQUmr %rdi, 1, %noreg, 0, %noreg, %xmm0
- VMOVDQU8Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
- ; CHECK: %xmm0 = VMOVDQUrm %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMOVDQU8Z128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMOVDQUrr %xmm0
- %xmm0 = VMOVDQU8Z128rr %xmm0
- ; CHECK: %xmm0 = VMOVDQUrr_REV %xmm0
- %xmm0 = VMOVDQU8Z128rr_REV %xmm0
- ; CHECK: %xmm0 = VMOVNTDQArm %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMOVNTDQAZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: VMOVUPDmr %rdi, 1, %noreg, 0, %noreg, %xmm0
- VMOVUPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
- ; CHECK: %xmm0 = VMOVUPDrm %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMOVUPDZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMOVUPDrr %xmm0
- %xmm0 = VMOVUPDZ128rr %xmm0
- ; CHECK: %xmm0 = VMOVUPDrr_REV %xmm0
- %xmm0 = VMOVUPDZ128rr_REV %xmm0
- ; CHECK: VMOVUPSmr %rdi, 1, %noreg, 0, %noreg, %xmm0
- VMOVUPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
- ; CHECK: %xmm0 = VMOVUPSrm %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMOVUPSZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMOVUPSrr %xmm0
- %xmm0 = VMOVUPSZ128rr %xmm0
- ; CHECK: %xmm0 = VMOVUPSrr_REV %xmm0
- %xmm0 = VMOVUPSZ128rr_REV %xmm0
- ; CHECK: VMOVNTDQmr %rdi, 1, %noreg, 0, %noreg, %xmm0
- VMOVNTDQZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
- ; CHECK: VMOVNTPDmr %rdi, 1, %noreg, 0, %noreg, %xmm0
- VMOVNTPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
- ; CHECK: VMOVNTPSmr %rdi, 1, %noreg, 0, %noreg, %xmm0
- VMOVNTPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
- ; CHECK: %xmm0 = VMOVAPDrr_REV %xmm0
- %xmm0 = VMOVAPDZ128rr_REV %xmm0
- ; CHECK: %xmm0 = VMOVAPSrr_REV %xmm0
- %xmm0 = VMOVAPSZ128rr_REV %xmm0
- ; CHECK: %xmm0 = VMOVDQArr_REV %xmm0
- %xmm0 = VMOVDQA32Z128rr_REV %xmm0
- ; CHECK: %xmm0 = VMOVDQArr_REV %xmm0
- %xmm0 = VMOVDQA64Z128rr_REV %xmm0
- ; CHECK: %xmm0 = VMOVDQUrr_REV %xmm0
- %xmm0 = VMOVDQU16Z128rr_REV %xmm0
- ; CHECK: %xmm0 = VMOVDQUrr_REV %xmm0
- %xmm0 = VMOVDQU32Z128rr_REV %xmm0
- ; CHECK: %xmm0 = VMOVDQUrr_REV %xmm0
- %xmm0 = VMOVDQU64Z128rr_REV %xmm0
- ; CHECK: %xmm0 = VPMOVSXBDrm %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMOVSXBDZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMOVSXBDrr %xmm0
- %xmm0 = VPMOVSXBDZ128rr %xmm0
- ; CHECK: %xmm0 = VPMOVSXBQrm %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMOVSXBQZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMOVSXBQrr %xmm0
- %xmm0 = VPMOVSXBQZ128rr %xmm0
- ; CHECK: %xmm0 = VPMOVSXBWrm %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMOVSXBWZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMOVSXBWrr %xmm0
- %xmm0 = VPMOVSXBWZ128rr %xmm0
- ; CHECK: %xmm0 = VPMOVSXDQrm %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMOVSXDQZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMOVSXDQrr %xmm0
- %xmm0 = VPMOVSXDQZ128rr %xmm0
- ; CHECK: %xmm0 = VPMOVSXWDrm %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMOVSXWDZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMOVSXWDrr %xmm0
- %xmm0 = VPMOVSXWDZ128rr %xmm0
- ; CHECK: %xmm0 = VPMOVSXWQrm %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMOVSXWQZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMOVSXWQrr %xmm0
- %xmm0 = VPMOVSXWQZ128rr %xmm0
- ; CHECK: %xmm0 = VPMOVZXBDrm %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMOVZXBDZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMOVZXBDrr %xmm0
- %xmm0 = VPMOVZXBDZ128rr %xmm0
- ; CHECK: %xmm0 = VPMOVZXBQrm %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMOVZXBQZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMOVZXBQrr %xmm0
- %xmm0 = VPMOVZXBQZ128rr %xmm0
- ; CHECK: %xmm0 = VPMOVZXBWrm %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMOVZXBWZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMOVZXBWrr %xmm0
- %xmm0 = VPMOVZXBWZ128rr %xmm0
- ; CHECK: %xmm0 = VPMOVZXDQrm %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMOVZXDQZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMOVZXDQrr %xmm0
- %xmm0 = VPMOVZXDQZ128rr %xmm0
- ; CHECK: %xmm0 = VPMOVZXWDrm %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMOVZXWDZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMOVZXWDrr %xmm0
- %xmm0 = VPMOVZXWDZ128rr %xmm0
- ; CHECK: %xmm0 = VPMOVZXWQrm %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMOVZXWQZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMOVZXWQrr %xmm0
- %xmm0 = VPMOVZXWQZ128rr %xmm0
- ; CHECK: VMOVHPDmr %rdi, 1, %noreg, 0, %noreg, %xmm0
- VMOVHPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
- ; CHECK: %xmm0 = VMOVHPDrm %xmm0, %rdi, 1, %noreg, 0, %noreg
- %xmm0 = VMOVHPDZ128rm %xmm0, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: VMOVHPSmr %rdi, 1, %noreg, 0, %noreg, %xmm0
- VMOVHPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
- ; CHECK: %xmm0 = VMOVHPSrm %xmm0, %rdi, 1, %noreg, 0, %noreg
- %xmm0 = VMOVHPSZ128rm %xmm0, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: VMOVLPDmr %rdi, 1, %noreg, 0, %noreg, %xmm0
- VMOVLPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
- ; CHECK: %xmm0 = VMOVLPDrm %xmm0, %rdi, 1, %noreg, 0, %noreg
- %xmm0 = VMOVLPDZ128rm %xmm0, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: VMOVLPSmr %rdi, 1, %noreg, 0, %noreg, %xmm0
- VMOVLPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm0
- ; CHECK: %xmm0 = VMOVLPSrm %xmm0, %rdi, 1, %noreg, 0, %noreg
- %xmm0 = VMOVLPSZ128rm %xmm0, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VMAXCPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMAXCPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMAXCPDrr %xmm0, %xmm1
- %xmm0 = VMAXCPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMAXCPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMAXCPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMAXCPSrr %xmm0, %xmm1
- %xmm0 = VMAXCPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMAXCPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMAXPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMAXCPDrr %xmm0, %xmm1
- %xmm0 = VMAXPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMAXCPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMAXPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMAXCPSrr %xmm0, %xmm1
- %xmm0 = VMAXPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMINCPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMINCPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMINCPDrr %xmm0, %xmm1
- %xmm0 = VMINCPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMINCPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMINCPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMINCPSrr %xmm0, %xmm1
- %xmm0 = VMINCPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMINCPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMINPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMINCPDrr %xmm0, %xmm1
- %xmm0 = VMINPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMINCPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMINPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMINCPSrr %xmm0, %xmm1
- %xmm0 = VMINPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMULPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMULPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMULPDrr %xmm0, %xmm1
- %xmm0 = VMULPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMULPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMULPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMULPSrr %xmm0, %xmm1
- %xmm0 = VMULPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VORPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VORPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VORPDrr %xmm0, %xmm1
- %xmm0 = VORPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VORPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VORPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VORPSrr %xmm0, %xmm1
- %xmm0 = VORPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPADDBrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPADDBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPADDBrr %xmm0, %xmm1
- %xmm0 = VPADDBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPADDDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPADDDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPADDDrr %xmm0, %xmm1
- %xmm0 = VPADDDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPADDQrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPADDQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPADDQrr %xmm0, %xmm1
- %xmm0 = VPADDQZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPADDSBrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPADDSBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPADDSBrr %xmm0, %xmm1
- %xmm0 = VPADDSBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPADDSWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPADDSWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPADDSWrr %xmm0, %xmm1
- %xmm0 = VPADDSWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPADDUSBrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPADDUSBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPADDUSBrr %xmm0, %xmm1
- %xmm0 = VPADDUSBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPADDUSWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPADDUSWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPADDUSWrr %xmm0, %xmm1
- %xmm0 = VPADDUSWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPADDWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPADDWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPADDWrr %xmm0, %xmm1
- %xmm0 = VPADDWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPANDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPANDDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPANDrr %xmm0, %xmm1
- %xmm0 = VPANDDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPANDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPANDQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPANDrr %xmm0, %xmm1
- %xmm0 = VPANDQZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPANDNrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPANDNDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPANDNrr %xmm0, %xmm1
- %xmm0 = VPANDNDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPANDNrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPANDNQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPANDNrr %xmm0, %xmm1
- %xmm0 = VPANDNQZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPAVGBrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPAVGBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPAVGBrr %xmm0, %xmm1
- %xmm0 = VPAVGBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPAVGWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPAVGWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPAVGWrr %xmm0, %xmm1
- %xmm0 = VPAVGWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMAXSBrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMAXSBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMAXSBrr %xmm0, %xmm1
- %xmm0 = VPMAXSBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMAXSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMAXSDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMAXSDrr %xmm0, %xmm1
- %xmm0 = VPMAXSDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMAXSWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMAXSWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMAXSWrr %xmm0, %xmm1
- %xmm0 = VPMAXSWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMAXUBrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMAXUBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMAXUBrr %xmm0, %xmm1
- %xmm0 = VPMAXUBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMAXUDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMAXUDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMAXUDrr %xmm0, %xmm1
- %xmm0 = VPMAXUDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMAXUWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMAXUWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMAXUWrr %xmm0, %xmm1
- %xmm0 = VPMAXUWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMINSBrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMINSBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMINSBrr %xmm0, %xmm1
- %xmm0 = VPMINSBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMINSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMINSDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMINSDrr %xmm0, %xmm1
- %xmm0 = VPMINSDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMINSWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMINSWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMINSWrr %xmm0, %xmm1
- %xmm0 = VPMINSWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMINUBrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMINUBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMINUBrr %xmm0, %xmm1
- %xmm0 = VPMINUBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMINUDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMINUDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMINUDrr %xmm0, %xmm1
- %xmm0 = VPMINUDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMINUWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMINUWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMINUWrr %xmm0, %xmm1
- %xmm0 = VPMINUWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMULDQrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMULDQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMULDQrr %xmm0, %xmm1
- %xmm0 = VPMULDQZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMULHRSWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMULHRSWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMULHRSWrr %xmm0, %xmm1
- %xmm0 = VPMULHRSWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMULHUWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMULHUWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMULHUWrr %xmm0, %xmm1
- %xmm0 = VPMULHUWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMULHWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMULHWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMULHWrr %xmm0, %xmm1
- %xmm0 = VPMULHWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMULLDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMULLDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMULLDrr %xmm0, %xmm1
- %xmm0 = VPMULLDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMULLWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMULLWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMULLWrr %xmm0, %xmm1
- %xmm0 = VPMULLWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMULUDQrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMULUDQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMULUDQrr %xmm0, %xmm1
- %xmm0 = VPMULUDQZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPORrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPORDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPORrr %xmm0, %xmm1
- %xmm0 = VPORDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPORrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPORQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPORrr %xmm0, %xmm1
- %xmm0 = VPORQZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPSUBBrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPSUBBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPSUBBrr %xmm0, %xmm1
- %xmm0 = VPSUBBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPSUBDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPSUBDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPSUBDrr %xmm0, %xmm1
- %xmm0 = VPSUBDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPSUBQrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPSUBQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPSUBQrr %xmm0, %xmm1
- %xmm0 = VPSUBQZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPSUBSBrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPSUBSBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPSUBSBrr %xmm0, %xmm1
- %xmm0 = VPSUBSBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPSUBSWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPSUBSWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPSUBSWrr %xmm0, %xmm1
- %xmm0 = VPSUBSWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPSUBUSBrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPSUBUSBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPSUBUSBrr %xmm0, %xmm1
- %xmm0 = VPSUBUSBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPSUBUSWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPSUBUSWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPSUBUSWrr %xmm0, %xmm1
- %xmm0 = VPSUBUSWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPSUBWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPSUBWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPSUBWrr %xmm0, %xmm1
- %xmm0 = VPSUBWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VADDPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VADDPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VADDPDrr %xmm0, %xmm1
- %xmm0 = VADDPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VADDPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VADDPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VADDPSrr %xmm0, %xmm1
- %xmm0 = VADDPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VANDNPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VANDNPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VANDNPDrr %xmm0, %xmm1
- %xmm0 = VANDNPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VANDNPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VANDNPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VANDNPSrr %xmm0, %xmm1
- %xmm0 = VANDNPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VANDPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VANDPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VANDPDrr %xmm0, %xmm1
- %xmm0 = VANDPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VANDPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VANDPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VANDPSrr %xmm0, %xmm1
- %xmm0 = VANDPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VDIVPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VDIVPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VDIVPDrr %xmm0, %xmm1
- %xmm0 = VDIVPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VDIVPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VDIVPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VDIVPSrr %xmm0, %xmm1
- %xmm0 = VDIVPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPXORrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPXORDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPXORrr %xmm0, %xmm1
- %xmm0 = VPXORDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPXORrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPXORQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPXORrr %xmm0, %xmm1
- %xmm0 = VPXORQZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VSUBPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VSUBPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VSUBPDrr %xmm0, %xmm1
- %xmm0 = VSUBPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VSUBPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VSUBPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VSUBPSrr %xmm0, %xmm1
- %xmm0 = VSUBPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VXORPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VXORPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VXORPDrr %xmm0, %xmm1
- %xmm0 = VXORPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VXORPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VXORPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VXORPSrr %xmm0, %xmm1
- %xmm0 = VXORPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMADDUBSWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMADDUBSWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMADDUBSWrr %xmm0, %xmm1
- %xmm0 = VPMADDUBSWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPMADDWDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPMADDWDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPMADDWDrr %xmm0, %xmm1
- %xmm0 = VPMADDWDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPACKSSDWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPACKSSDWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPACKSSDWrr %xmm0, %xmm1
- %xmm0 = VPACKSSDWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPACKSSWBrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPACKSSWBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPACKSSWBrr %xmm0, %xmm1
- %xmm0 = VPACKSSWBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPACKUSDWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPACKUSDWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPACKUSDWrr %xmm0, %xmm1
- %xmm0 = VPACKUSDWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPACKUSWBrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPACKUSWBZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPACKUSWBrr %xmm0, %xmm1
- %xmm0 = VPACKUSWBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPUNPCKHBWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPUNPCKHBWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPUNPCKHBWrr %xmm0, %xmm1
- %xmm0 = VPUNPCKHBWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPUNPCKHDQrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPUNPCKHDQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPUNPCKHDQrr %xmm0, %xmm1
- %xmm0 = VPUNPCKHDQZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPUNPCKHQDQrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPUNPCKHQDQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPUNPCKHQDQrr %xmm0, %xmm1
- %xmm0 = VPUNPCKHQDQZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPUNPCKHWDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPUNPCKHWDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPUNPCKHWDrr %xmm0, %xmm1
- %xmm0 = VPUNPCKHWDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPUNPCKLBWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPUNPCKLBWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPUNPCKLBWrr %xmm0, %xmm1
- %xmm0 = VPUNPCKLBWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPUNPCKLDQrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPUNPCKLDQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPUNPCKLDQrr %xmm0, %xmm1
- %xmm0 = VPUNPCKLDQZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPUNPCKLQDQrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPUNPCKLQDQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPUNPCKLQDQrr %xmm0, %xmm1
- %xmm0 = VPUNPCKLQDQZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPUNPCKLWDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPUNPCKLWDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPUNPCKLWDrr %xmm0, %xmm1
- %xmm0 = VPUNPCKLWDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VUNPCKHPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VUNPCKHPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VUNPCKHPDrr %xmm0, %xmm1
- %xmm0 = VUNPCKHPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VUNPCKHPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VUNPCKHPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VUNPCKHPSrr %xmm0, %xmm1
- %xmm0 = VUNPCKHPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VUNPCKLPDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VUNPCKLPDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VUNPCKLPDrr %xmm0, %xmm1
- %xmm0 = VUNPCKLPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VUNPCKLPSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VUNPCKLPSZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VUNPCKLPSrr %xmm0, %xmm1
- %xmm0 = VUNPCKLPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VFMADD132PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMADD132PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMADD132PDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMADD132PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADD132PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMADD132PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMADD132PSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMADD132PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADD213PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMADD213PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMADD213PDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMADD213PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADD213PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMADD213PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMADD213PSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMADD213PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADD231PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMADD231PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMADD231PDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMADD231PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADD231PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMADD231PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMADD231PSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMADD231PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADDSUB132PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMADDSUB132PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMADDSUB132PDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMADDSUB132PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADDSUB132PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMADDSUB132PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMADDSUB132PSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMADDSUB132PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADDSUB213PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMADDSUB213PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMADDSUB213PDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMADDSUB213PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADDSUB213PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMADDSUB213PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMADDSUB213PSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMADDSUB213PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADDSUB231PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMADDSUB231PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMADDSUB231PDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMADDSUB231PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADDSUB231PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMADDSUB231PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMADDSUB231PSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMADDSUB231PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB132PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMSUB132PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMSUB132PDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMSUB132PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB132PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMSUB132PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMSUB132PSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMSUB132PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB213PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMSUB213PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMSUB213PDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMSUB213PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB213PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMSUB213PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMSUB213PSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMSUB213PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB231PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMSUB231PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMSUB231PDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMSUB231PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB231PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMSUB231PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMSUB231PSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMSUB231PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUBADD132PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMSUBADD132PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMSUBADD132PDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMSUBADD132PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUBADD132PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMSUBADD132PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMSUBADD132PSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMSUBADD132PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUBADD213PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMSUBADD213PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMSUBADD213PDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMSUBADD213PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUBADD213PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMSUBADD213PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMSUBADD213PSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMSUBADD213PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUBADD231PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMSUBADD231PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMSUBADD231PDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMSUBADD231PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUBADD231PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMSUBADD231PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMSUBADD231PSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMSUBADD231PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD132PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMADD132PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMADD132PDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMADD132PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD132PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMADD132PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMADD132PSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMADD132PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD213PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMADD213PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMADD213PDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMADD213PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD213PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMADD213PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMADD213PSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMADD213PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD231PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMADD231PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMADD231PDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMADD231PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD231PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMADD231PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMADD231PSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMADD231PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB132PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMSUB132PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMSUB132PDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMSUB132PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB132PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMSUB132PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMSUB132PSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMSUB132PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB213PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMSUB213PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMSUB213PDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMSUB213PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB213PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMSUB213PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMSUB213PSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMSUB213PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB231PDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMSUB231PDZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMSUB231PDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMSUB231PDZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB231PSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMSUB231PSZ128m %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMSUB231PSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMSUB231PSZ128r %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VPSLLDri %xmm0, 7
- %xmm0 = VPSLLDZ128ri %xmm0, 7
- ; CHECK: %xmm0 = VPSLLDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPSLLDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPSLLDrr %xmm0, 14
- %xmm0 = VPSLLDZ128rr %xmm0, 14
- ; CHECK: %xmm0 = VPSLLQri %xmm0, 7
- %xmm0 = VPSLLQZ128ri %xmm0, 7
- ; CHECK: %xmm0 = VPSLLQrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPSLLQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPSLLQrr %xmm0, 14
- %xmm0 = VPSLLQZ128rr %xmm0, 14
- ; CHECK: %xmm0 = VPSLLVDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPSLLVDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPSLLVDrr %xmm0, 14
- %xmm0 = VPSLLVDZ128rr %xmm0, 14
- ; CHECK: %xmm0 = VPSLLVQrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPSLLVQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPSLLVQrr %xmm0, 14
- %xmm0 = VPSLLVQZ128rr %xmm0, 14
- ; CHECK: %xmm0 = VPSLLWri %xmm0, 7
- %xmm0 = VPSLLWZ128ri %xmm0, 7
- ; CHECK: %xmm0 = VPSLLWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPSLLWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPSLLWrr %xmm0, 14
- %xmm0 = VPSLLWZ128rr %xmm0, 14
- ; CHECK: %xmm0 = VPSRADri %xmm0, 7
- %xmm0 = VPSRADZ128ri %xmm0, 7
- ; CHECK: %xmm0 = VPSRADrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPSRADZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPSRADrr %xmm0, 14
- %xmm0 = VPSRADZ128rr %xmm0, 14
- ; CHECK: %xmm0 = VPSRAVDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPSRAVDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPSRAVDrr %xmm0, 14
- %xmm0 = VPSRAVDZ128rr %xmm0, 14
- ; CHECK: %xmm0 = VPSRAWri %xmm0, 7
- %xmm0 = VPSRAWZ128ri %xmm0, 7
- ; CHECK: %xmm0 = VPSRAWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPSRAWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPSRAWrr %xmm0, 14
- %xmm0 = VPSRAWZ128rr %xmm0, 14
- ; CHECK: %xmm0 = VPSRLDQri %xmm0, 14
- %xmm0 = VPSRLDQZ128rr %xmm0, 14
- ; CHECK: %xmm0 = VPSRLDri %xmm0, 7
- %xmm0 = VPSRLDZ128ri %xmm0, 7
- ; CHECK: %xmm0 = VPSRLDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPSRLDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPSRLDrr %xmm0, 14
- %xmm0 = VPSRLDZ128rr %xmm0, 14
- ; CHECK: %xmm0 = VPSRLQri %xmm0, 7
- %xmm0 = VPSRLQZ128ri %xmm0, 7
- ; CHECK: %xmm0 = VPSRLQrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPSRLQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPSRLQrr %xmm0, 14
- %xmm0 = VPSRLQZ128rr %xmm0, 14
- ; CHECK: %xmm0 = VPSRLVDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPSRLVDZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPSRLVDrr %xmm0, 14
- %xmm0 = VPSRLVDZ128rr %xmm0, 14
- ; CHECK: %xmm0 = VPSRLVQrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPSRLVQZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPSRLVQrr %xmm0, 14
- %xmm0 = VPSRLVQZ128rr %xmm0, 14
- ; CHECK: %xmm0 = VPSRLWri %xmm0, 7
- %xmm0 = VPSRLWZ128ri %xmm0, 7
- ; CHECK: %xmm0 = VPSRLWrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPSRLWZ128rm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPSRLWrr %xmm0, 14
- %xmm0 = VPSRLWZ128rr %xmm0, 14
- ; CHECK: %xmm0 = VPERMILPDmi %rdi, 1, %noreg, 0, %noreg, %noreg
- %xmm0 = VPERMILPDZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %xmm0 = VPERMILPDri %xmm0, 9
- %xmm0 = VPERMILPDZ128ri %xmm0, 9
- ; CHECK: %xmm0 = VPERMILPDrm %xmm0, %rdi, 1, %noreg, 0, %noreg
- %xmm0 = VPERMILPDZ128rm %xmm0, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VPERMILPDrr %xmm0, %xmm1
- %xmm0 = VPERMILPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPERMILPSmi %rdi, 1, %noreg, 0, %noreg, %noreg
- %xmm0 = VPERMILPSZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %xmm0 = VPERMILPSri %xmm0, 9
- %xmm0 = VPERMILPSZ128ri %xmm0, 9
- ; CHECK: %xmm0 = VPERMILPSrm %xmm0, %rdi, 1, %noreg, 0, %noreg
- %xmm0 = VPERMILPSZ128rm %xmm0, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VPERMILPSrr %xmm0, %xmm1
- %xmm0 = VPERMILPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VCVTPH2PSrm %rdi, %xmm0, 1, %noreg, 0
- %xmm0 = VCVTPH2PSZ128rm %rdi, %xmm0, 1, %noreg, 0
- ; CHECK: %xmm0 = VCVTPH2PSrr %xmm0
- %xmm0 = VCVTPH2PSZ128rr %xmm0
- ; CHECK: %xmm0 = VCVTDQ2PDrm %rdi, %xmm0, 1, %noreg, 0
- %xmm0 = VCVTDQ2PDZ128rm %rdi, %xmm0, 1, %noreg, 0
- ; CHECK: %xmm0 = VCVTDQ2PDrr %xmm0
- %xmm0 = VCVTDQ2PDZ128rr %xmm0
- ; CHECK: %xmm0 = VCVTDQ2PSrm %rdi, %xmm0, 1, %noreg, 0
- %xmm0 = VCVTDQ2PSZ128rm %rdi, %xmm0, 1, %noreg, 0
- ; CHECK: %xmm0 = VCVTDQ2PSrr %xmm0
- %xmm0 = VCVTDQ2PSZ128rr %xmm0
- ; CHECK: %xmm0 = VCVTPD2DQrm %rdi, %xmm0, 1, %noreg, 0
- %xmm0 = VCVTPD2DQZ128rm %rdi, %xmm0, 1, %noreg, 0
- ; CHECK: %xmm0 = VCVTPD2DQrr %xmm0
- %xmm0 = VCVTPD2DQZ128rr %xmm0
- ; CHECK: %xmm0 = VCVTPD2PSrm %rdi, %xmm0, 1, %noreg, 0
- %xmm0 = VCVTPD2PSZ128rm %rdi, %xmm0, 1, %noreg, 0
- ; CHECK: %xmm0 = VCVTPD2PSrr %xmm0
- %xmm0 = VCVTPD2PSZ128rr %xmm0
- ; CHECK: %xmm0 = VCVTPS2DQrm %rdi, %xmm0, 1, %noreg, 0
- %xmm0 = VCVTPS2DQZ128rm %rdi, %xmm0, 1, %noreg, 0
- ; CHECK: %xmm0 = VCVTPS2DQrr %xmm0
- %xmm0 = VCVTPS2DQZ128rr %xmm0
- ; CHECK: %xmm0 = VCVTPS2PDrm %rdi, %xmm0, 1, %noreg, 0
- %xmm0 = VCVTPS2PDZ128rm %rdi, %xmm0, 1, %noreg, 0
- ; CHECK: %xmm0 = VCVTPS2PDrr %xmm0
- %xmm0 = VCVTPS2PDZ128rr %xmm0
- ; CHECK: %xmm0 = VCVTTPD2DQrm %rdi, %xmm0, 1, %noreg, 0
- %xmm0 = VCVTTPD2DQZ128rm %rdi, %xmm0, 1, %noreg, 0
- ; CHECK: %xmm0 = VCVTTPD2DQrr %xmm0
- %xmm0 = VCVTTPD2DQZ128rr %xmm0
- ; CHECK: %xmm0 = VCVTTPS2DQrm %rdi, %xmm0, 1, %noreg, 0
- %xmm0 = VCVTTPS2DQZ128rm %rdi, %xmm0, 1, %noreg, 0
- ; CHECK: %xmm0 = VCVTTPS2DQrr %xmm0
- %xmm0 = VCVTTPS2DQZ128rr %xmm0
- ; CHECK: %xmm0 = VSQRTPDm %rdi, %noreg, %noreg, %noreg, %noreg
- %xmm0 = VSQRTPDZ128m %rdi, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm0 = VSQRTPDr %xmm0
- %xmm0 = VSQRTPDZ128r %xmm0
- ; CHECK: %xmm0 = VSQRTPSm %rdi, %noreg, %noreg, %noreg, %noreg
- %xmm0 = VSQRTPSZ128m %rdi, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm0 = VSQRTPSr %xmm0
- %xmm0 = VSQRTPSZ128r %xmm0
- ; CHECK: %xmm0 = VMOVDDUPrm %rdi, 1, %noreg, 0, %noreg
- %xmm0 = VMOVDDUPZ128rm %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VMOVDDUPrr %xmm0
- %xmm0 = VMOVDDUPZ128rr %xmm0
- ; CHECK: %xmm0 = VMOVSHDUPrm %rdi, 1, %noreg, 0, %noreg
- %xmm0 = VMOVSHDUPZ128rm %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VMOVSHDUPrr %xmm0
- %xmm0 = VMOVSHDUPZ128rr %xmm0
- ; CHECK: %xmm0 = VMOVSLDUPrm %rdi, 1, %noreg, 0, %noreg
- %xmm0 = VMOVSLDUPZ128rm %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VMOVSLDUPrr %xmm0
- %xmm0 = VMOVSLDUPZ128rr %xmm0
- ; CHECK: %xmm0 = VPSHUFBrm %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg
- %xmm0 = VPSHUFBZ128rm %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm0 = VPSHUFBrr %xmm0, %xmm1
- %xmm0 = VPSHUFBZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VPSHUFDmi %rdi, 1, %noreg, 0, %noreg, %noreg
- %xmm0 = VPSHUFDZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %xmm0 = VPSHUFDri %xmm0, -24
- %xmm0 = VPSHUFDZ128ri %xmm0, -24
- ; CHECK: %xmm0 = VPSHUFHWmi %rdi, 1, %noreg, 0, %noreg, %noreg
- %xmm0 = VPSHUFHWZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %xmm0 = VPSHUFHWri %xmm0, -24
- %xmm0 = VPSHUFHWZ128ri %xmm0, -24
- ; CHECK: %xmm0 = VPSHUFLWmi %rdi, 1, %noreg, 0, %noreg, %noreg
- %xmm0 = VPSHUFLWZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %xmm0 = VPSHUFLWri %xmm0, -24
- %xmm0 = VPSHUFLWZ128ri %xmm0, -24
- ; CHECK: %xmm0 = VPSLLDQri %xmm0, %xmm1
- %xmm0 = VPSLLDQZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VSHUFPDrmi %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
- %xmm0 = VSHUFPDZ128rmi %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm0 = VSHUFPDrri %xmm0, %noreg, %noreg
- %xmm0 = VSHUFPDZ128rri %xmm0, %noreg, %noreg
- ; CHECK: %xmm0 = VSHUFPSrmi %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
- %xmm0 = VSHUFPSZ128rmi %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm0 = VSHUFPSrri %xmm0, %noreg, %noreg
- %xmm0 = VSHUFPSZ128rri %xmm0, %noreg, %noreg
- ; CHECK: %xmm0 = VPSADBWrm %xmm0, 1, %noreg, %rax, %noreg, %noreg
- %xmm0 = VPSADBWZ128rm %xmm0, 1, %noreg, %rax, %noreg, %noreg
- ; CHECK: %xmm0 = VPSADBWrr %xmm0, %xmm1
- %xmm0 = VPSADBWZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VBROADCASTSSrm %rip, %noreg, %noreg, %noreg, %noreg
- %xmm0 = VBROADCASTSSZ128m %rip, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm0 = VBROADCASTSSrr %xmm0
- %xmm0 = VBROADCASTSSZ128r %xmm0
- ; CHECK: %xmm0 = VPBROADCASTBrm %rip, %noreg, %noreg, %noreg, %noreg
- %xmm0 = VPBROADCASTBZ128m %rip, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm0 = VPBROADCASTBrr %xmm0
- %xmm0 = VPBROADCASTBZ128r %xmm0
- ; CHECK: %xmm0 = VPBROADCASTDrm %rip, %noreg, %noreg, %noreg, %noreg
- %xmm0 = VPBROADCASTDZ128m %rip, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm0 = VPBROADCASTDrr %xmm0
- %xmm0 = VPBROADCASTDZ128r %xmm0
- ; CHECK: %xmm0 = VPBROADCASTQrm %rip, %noreg, %noreg, %noreg, %noreg
- %xmm0 = VPBROADCASTQZ128m %rip, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm0 = VPBROADCASTQrr %xmm0
- %xmm0 = VPBROADCASTQZ128r %xmm0
- ; CHECK: %xmm0 = VPBROADCASTWrm %rip, %noreg, %noreg, %noreg, %noreg
- %xmm0 = VPBROADCASTWZ128m %rip, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm0 = VPBROADCASTWrr %xmm0
- %xmm0 = VPBROADCASTWZ128r %xmm0
- ; CHECK: %xmm0 = VPBROADCASTQrm %rip, %noreg, %noreg, %noreg, %noreg
- %xmm0 = VBROADCASTI32X2Z128m %rip, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm0 = VPBROADCASTQrr %xmm0
- %xmm0 = VBROADCASTI32X2Z128r %xmm0
- ; CHECK: %xmm0 = VCVTPS2PHrr %xmm0, 2
- %xmm0 = VCVTPS2PHZ128rr %xmm0, 2
- ; CHECK: VCVTPS2PHmr %rdi, %xmm0, 1, %noreg, 0, %noreg, %noreg
- VCVTPS2PHZ128mr %rdi, %xmm0, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %xmm0 = VPABSBrm %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPABSBZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPABSBrr %xmm0
- %xmm0 = VPABSBZ128rr %xmm0
- ; CHECK: %xmm0 = VPABSDrm %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPABSDZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPABSDrr %xmm0
- %xmm0 = VPABSDZ128rr %xmm0
- ; CHECK: %xmm0 = VPABSWrm %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VPABSWZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VPABSWrr %xmm0
- %xmm0 = VPABSWZ128rr %xmm0
- ; CHECK: %xmm0 = VPALIGNRrmi %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
- %xmm0 = VPALIGNRZ128rmi %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm0 = VPALIGNRrri %xmm0, %xmm1, 15
- %xmm0 = VPALIGNRZ128rri %xmm0, %xmm1, 15
+ ; CHECK: VMOVAPDmr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ VMOVAPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ ; CHECK: $xmm0 = VMOVAPDrm $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMOVAPDZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMOVAPDrr $xmm0
+ $xmm0 = VMOVAPDZ128rr $xmm0
+ ; CHECK: VMOVAPSmr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ VMOVAPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ ; CHECK: $xmm0 = VMOVAPSrm $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMOVAPSZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMOVAPSrr $xmm0
+ $xmm0 = VMOVAPSZ128rr $xmm0
+ ; CHECK: VMOVDQAmr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ VMOVDQA32Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ ; CHECK: $xmm0 = VMOVDQArm $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMOVDQA32Z128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMOVDQArr $xmm0
+ $xmm0 = VMOVDQA32Z128rr $xmm0
+ ; CHECK: VMOVDQAmr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ VMOVDQA64Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ ; CHECK: $xmm0 = VMOVDQArm $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMOVDQA64Z128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMOVDQArr $xmm0
+ $xmm0 = VMOVDQA64Z128rr $xmm0
+ ; CHECK: VMOVDQUmr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ VMOVDQU16Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ ; CHECK: $xmm0 = VMOVDQUrm $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMOVDQU16Z128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMOVDQUrr $xmm0
+ $xmm0 = VMOVDQU16Z128rr $xmm0
+ ; CHECK: VMOVDQUmr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ VMOVDQU32Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ ; CHECK: $xmm0 = VMOVDQUrm $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMOVDQU32Z128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMOVDQUrr $xmm0
+ $xmm0 = VMOVDQU32Z128rr $xmm0
+ ; CHECK: VMOVDQUmr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ VMOVDQU64Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ ; CHECK: $xmm0 = VMOVDQUrm $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMOVDQU64Z128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMOVDQUrr $xmm0
+ $xmm0 = VMOVDQU64Z128rr $xmm0
+ ; CHECK: VMOVDQUmr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ VMOVDQU8Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ ; CHECK: $xmm0 = VMOVDQUrm $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMOVDQU8Z128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMOVDQUrr $xmm0
+ $xmm0 = VMOVDQU8Z128rr $xmm0
+ ; CHECK: $xmm0 = VMOVDQUrr_REV $xmm0
+ $xmm0 = VMOVDQU8Z128rr_REV $xmm0
+ ; CHECK: $xmm0 = VMOVNTDQArm $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMOVNTDQAZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: VMOVUPDmr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ VMOVUPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ ; CHECK: $xmm0 = VMOVUPDrm $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMOVUPDZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMOVUPDrr $xmm0
+ $xmm0 = VMOVUPDZ128rr $xmm0
+ ; CHECK: $xmm0 = VMOVUPDrr_REV $xmm0
+ $xmm0 = VMOVUPDZ128rr_REV $xmm0
+ ; CHECK: VMOVUPSmr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ VMOVUPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ ; CHECK: $xmm0 = VMOVUPSrm $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMOVUPSZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMOVUPSrr $xmm0
+ $xmm0 = VMOVUPSZ128rr $xmm0
+ ; CHECK: $xmm0 = VMOVUPSrr_REV $xmm0
+ $xmm0 = VMOVUPSZ128rr_REV $xmm0
+ ; CHECK: VMOVNTDQmr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ VMOVNTDQZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ ; CHECK: VMOVNTPDmr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ VMOVNTPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ ; CHECK: VMOVNTPSmr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ VMOVNTPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ ; CHECK: $xmm0 = VMOVAPDrr_REV $xmm0
+ $xmm0 = VMOVAPDZ128rr_REV $xmm0
+ ; CHECK: $xmm0 = VMOVAPSrr_REV $xmm0
+ $xmm0 = VMOVAPSZ128rr_REV $xmm0
+ ; CHECK: $xmm0 = VMOVDQArr_REV $xmm0
+ $xmm0 = VMOVDQA32Z128rr_REV $xmm0
+ ; CHECK: $xmm0 = VMOVDQArr_REV $xmm0
+ $xmm0 = VMOVDQA64Z128rr_REV $xmm0
+ ; CHECK: $xmm0 = VMOVDQUrr_REV $xmm0
+ $xmm0 = VMOVDQU16Z128rr_REV $xmm0
+ ; CHECK: $xmm0 = VMOVDQUrr_REV $xmm0
+ $xmm0 = VMOVDQU32Z128rr_REV $xmm0
+ ; CHECK: $xmm0 = VMOVDQUrr_REV $xmm0
+ $xmm0 = VMOVDQU64Z128rr_REV $xmm0
+ ; CHECK: $xmm0 = VPMOVSXBDrm $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMOVSXBDZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMOVSXBDrr $xmm0
+ $xmm0 = VPMOVSXBDZ128rr $xmm0
+ ; CHECK: $xmm0 = VPMOVSXBQrm $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMOVSXBQZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMOVSXBQrr $xmm0
+ $xmm0 = VPMOVSXBQZ128rr $xmm0
+ ; CHECK: $xmm0 = VPMOVSXBWrm $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMOVSXBWZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMOVSXBWrr $xmm0
+ $xmm0 = VPMOVSXBWZ128rr $xmm0
+ ; CHECK: $xmm0 = VPMOVSXDQrm $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMOVSXDQZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMOVSXDQrr $xmm0
+ $xmm0 = VPMOVSXDQZ128rr $xmm0
+ ; CHECK: $xmm0 = VPMOVSXWDrm $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMOVSXWDZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMOVSXWDrr $xmm0
+ $xmm0 = VPMOVSXWDZ128rr $xmm0
+ ; CHECK: $xmm0 = VPMOVSXWQrm $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMOVSXWQZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMOVSXWQrr $xmm0
+ $xmm0 = VPMOVSXWQZ128rr $xmm0
+ ; CHECK: $xmm0 = VPMOVZXBDrm $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMOVZXBDZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMOVZXBDrr $xmm0
+ $xmm0 = VPMOVZXBDZ128rr $xmm0
+ ; CHECK: $xmm0 = VPMOVZXBQrm $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMOVZXBQZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMOVZXBQrr $xmm0
+ $xmm0 = VPMOVZXBQZ128rr $xmm0
+ ; CHECK: $xmm0 = VPMOVZXBWrm $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMOVZXBWZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMOVZXBWrr $xmm0
+ $xmm0 = VPMOVZXBWZ128rr $xmm0
+ ; CHECK: $xmm0 = VPMOVZXDQrm $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMOVZXDQZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMOVZXDQrr $xmm0
+ $xmm0 = VPMOVZXDQZ128rr $xmm0
+ ; CHECK: $xmm0 = VPMOVZXWDrm $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMOVZXWDZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMOVZXWDrr $xmm0
+ $xmm0 = VPMOVZXWDZ128rr $xmm0
+ ; CHECK: $xmm0 = VPMOVZXWQrm $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMOVZXWQZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMOVZXWQrr $xmm0
+ $xmm0 = VPMOVZXWQZ128rr $xmm0
+ ; CHECK: VMOVHPDmr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ VMOVHPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ ; CHECK: $xmm0 = VMOVHPDrm $xmm0, $rdi, 1, $noreg, 0, $noreg
+ $xmm0 = VMOVHPDZ128rm $xmm0, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: VMOVHPSmr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ VMOVHPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ ; CHECK: $xmm0 = VMOVHPSrm $xmm0, $rdi, 1, $noreg, 0, $noreg
+ $xmm0 = VMOVHPSZ128rm $xmm0, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: VMOVLPDmr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ VMOVLPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ ; CHECK: $xmm0 = VMOVLPDrm $xmm0, $rdi, 1, $noreg, 0, $noreg
+ $xmm0 = VMOVLPDZ128rm $xmm0, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: VMOVLPSmr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ VMOVLPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm0
+ ; CHECK: $xmm0 = VMOVLPSrm $xmm0, $rdi, 1, $noreg, 0, $noreg
+ $xmm0 = VMOVLPSZ128rm $xmm0, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VMAXCPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMAXCPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMAXCPDrr $xmm0, $xmm1
+ $xmm0 = VMAXCPDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VMAXCPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMAXCPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMAXCPSrr $xmm0, $xmm1
+ $xmm0 = VMAXCPSZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VMAXCPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMAXPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMAXCPDrr $xmm0, $xmm1
+ $xmm0 = VMAXPDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VMAXCPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMAXPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMAXCPSrr $xmm0, $xmm1
+ $xmm0 = VMAXPSZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VMINCPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMINCPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMINCPDrr $xmm0, $xmm1
+ $xmm0 = VMINCPDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VMINCPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMINCPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMINCPSrr $xmm0, $xmm1
+ $xmm0 = VMINCPSZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VMINCPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMINPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMINCPDrr $xmm0, $xmm1
+ $xmm0 = VMINPDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VMINCPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMINPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMINCPSrr $xmm0, $xmm1
+ $xmm0 = VMINPSZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VMULPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMULPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMULPDrr $xmm0, $xmm1
+ $xmm0 = VMULPDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VMULPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMULPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMULPSrr $xmm0, $xmm1
+ $xmm0 = VMULPSZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VORPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VORPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VORPDrr $xmm0, $xmm1
+ $xmm0 = VORPDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VORPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VORPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VORPSrr $xmm0, $xmm1
+ $xmm0 = VORPSZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPADDBrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPADDBZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPADDBrr $xmm0, $xmm1
+ $xmm0 = VPADDBZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPADDDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPADDDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPADDDrr $xmm0, $xmm1
+ $xmm0 = VPADDDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPADDQrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPADDQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPADDQrr $xmm0, $xmm1
+ $xmm0 = VPADDQZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPADDSBrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPADDSBZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPADDSBrr $xmm0, $xmm1
+ $xmm0 = VPADDSBZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPADDSWrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPADDSWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPADDSWrr $xmm0, $xmm1
+ $xmm0 = VPADDSWZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPADDUSBrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPADDUSBZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPADDUSBrr $xmm0, $xmm1
+ $xmm0 = VPADDUSBZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPADDUSWrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPADDUSWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPADDUSWrr $xmm0, $xmm1
+ $xmm0 = VPADDUSWZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPADDWrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPADDWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPADDWrr $xmm0, $xmm1
+ $xmm0 = VPADDWZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPANDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPANDDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPANDrr $xmm0, $xmm1
+ $xmm0 = VPANDDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPANDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPANDQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPANDrr $xmm0, $xmm1
+ $xmm0 = VPANDQZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPANDNrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPANDNDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPANDNrr $xmm0, $xmm1
+ $xmm0 = VPANDNDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPANDNrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPANDNQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPANDNrr $xmm0, $xmm1
+ $xmm0 = VPANDNQZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPAVGBrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPAVGBZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPAVGBrr $xmm0, $xmm1
+ $xmm0 = VPAVGBZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPAVGWrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPAVGWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPAVGWrr $xmm0, $xmm1
+ $xmm0 = VPAVGWZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPMAXSBrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMAXSBZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMAXSBrr $xmm0, $xmm1
+ $xmm0 = VPMAXSBZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPMAXSDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMAXSDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMAXSDrr $xmm0, $xmm1
+ $xmm0 = VPMAXSDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPMAXSWrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMAXSWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMAXSWrr $xmm0, $xmm1
+ $xmm0 = VPMAXSWZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPMAXUBrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMAXUBZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMAXUBrr $xmm0, $xmm1
+ $xmm0 = VPMAXUBZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPMAXUDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMAXUDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMAXUDrr $xmm0, $xmm1
+ $xmm0 = VPMAXUDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPMAXUWrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMAXUWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMAXUWrr $xmm0, $xmm1
+ $xmm0 = VPMAXUWZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPMINSBrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMINSBZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMINSBrr $xmm0, $xmm1
+ $xmm0 = VPMINSBZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPMINSDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMINSDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMINSDrr $xmm0, $xmm1
+ $xmm0 = VPMINSDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPMINSWrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMINSWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMINSWrr $xmm0, $xmm1
+ $xmm0 = VPMINSWZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPMINUBrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMINUBZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMINUBrr $xmm0, $xmm1
+ $xmm0 = VPMINUBZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPMINUDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMINUDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMINUDrr $xmm0, $xmm1
+ $xmm0 = VPMINUDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPMINUWrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMINUWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMINUWrr $xmm0, $xmm1
+ $xmm0 = VPMINUWZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPMULDQrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMULDQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMULDQrr $xmm0, $xmm1
+ $xmm0 = VPMULDQZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPMULHRSWrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMULHRSWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMULHRSWrr $xmm0, $xmm1
+ $xmm0 = VPMULHRSWZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPMULHUWrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMULHUWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMULHUWrr $xmm0, $xmm1
+ $xmm0 = VPMULHUWZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPMULHWrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMULHWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMULHWrr $xmm0, $xmm1
+ $xmm0 = VPMULHWZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPMULLDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMULLDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMULLDrr $xmm0, $xmm1
+ $xmm0 = VPMULLDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPMULLWrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMULLWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMULLWrr $xmm0, $xmm1
+ $xmm0 = VPMULLWZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPMULUDQrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMULUDQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMULUDQrr $xmm0, $xmm1
+ $xmm0 = VPMULUDQZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPORrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPORDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPORrr $xmm0, $xmm1
+ $xmm0 = VPORDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPORrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPORQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPORrr $xmm0, $xmm1
+ $xmm0 = VPORQZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPSUBBrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPSUBBZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPSUBBrr $xmm0, $xmm1
+ $xmm0 = VPSUBBZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPSUBDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPSUBDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPSUBDrr $xmm0, $xmm1
+ $xmm0 = VPSUBDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPSUBQrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPSUBQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPSUBQrr $xmm0, $xmm1
+ $xmm0 = VPSUBQZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPSUBSBrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPSUBSBZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPSUBSBrr $xmm0, $xmm1
+ $xmm0 = VPSUBSBZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPSUBSWrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPSUBSWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPSUBSWrr $xmm0, $xmm1
+ $xmm0 = VPSUBSWZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPSUBUSBrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPSUBUSBZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPSUBUSBrr $xmm0, $xmm1
+ $xmm0 = VPSUBUSBZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPSUBUSWrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPSUBUSWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPSUBUSWrr $xmm0, $xmm1
+ $xmm0 = VPSUBUSWZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPSUBWrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPSUBWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPSUBWrr $xmm0, $xmm1
+ $xmm0 = VPSUBWZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VADDPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VADDPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VADDPDrr $xmm0, $xmm1
+ $xmm0 = VADDPDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VADDPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VADDPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VADDPSrr $xmm0, $xmm1
+ $xmm0 = VADDPSZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VANDNPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VANDNPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VANDNPDrr $xmm0, $xmm1
+ $xmm0 = VANDNPDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VANDNPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VANDNPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VANDNPSrr $xmm0, $xmm1
+ $xmm0 = VANDNPSZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VANDPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VANDPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VANDPDrr $xmm0, $xmm1
+ $xmm0 = VANDPDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VANDPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VANDPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VANDPSrr $xmm0, $xmm1
+ $xmm0 = VANDPSZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VDIVPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VDIVPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VDIVPDrr $xmm0, $xmm1
+ $xmm0 = VDIVPDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VDIVPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VDIVPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VDIVPSrr $xmm0, $xmm1
+ $xmm0 = VDIVPSZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPXORrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPXORDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPXORrr $xmm0, $xmm1
+ $xmm0 = VPXORDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPXORrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPXORQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPXORrr $xmm0, $xmm1
+ $xmm0 = VPXORQZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VSUBPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VSUBPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VSUBPDrr $xmm0, $xmm1
+ $xmm0 = VSUBPDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VSUBPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VSUBPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VSUBPSrr $xmm0, $xmm1
+ $xmm0 = VSUBPSZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VXORPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VXORPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VXORPDrr $xmm0, $xmm1
+ $xmm0 = VXORPDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VXORPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VXORPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VXORPSrr $xmm0, $xmm1
+ $xmm0 = VXORPSZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPMADDUBSWrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMADDUBSWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMADDUBSWrr $xmm0, $xmm1
+ $xmm0 = VPMADDUBSWZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPMADDWDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPMADDWDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPMADDWDrr $xmm0, $xmm1
+ $xmm0 = VPMADDWDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPACKSSDWrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPACKSSDWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPACKSSDWrr $xmm0, $xmm1
+ $xmm0 = VPACKSSDWZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPACKSSWBrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPACKSSWBZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPACKSSWBrr $xmm0, $xmm1
+ $xmm0 = VPACKSSWBZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPACKUSDWrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPACKUSDWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPACKUSDWrr $xmm0, $xmm1
+ $xmm0 = VPACKUSDWZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPACKUSWBrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPACKUSWBZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPACKUSWBrr $xmm0, $xmm1
+ $xmm0 = VPACKUSWBZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPUNPCKHBWrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPUNPCKHBWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPUNPCKHBWrr $xmm0, $xmm1
+ $xmm0 = VPUNPCKHBWZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPUNPCKHDQrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPUNPCKHDQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPUNPCKHDQrr $xmm0, $xmm1
+ $xmm0 = VPUNPCKHDQZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPUNPCKHQDQrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPUNPCKHQDQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPUNPCKHQDQrr $xmm0, $xmm1
+ $xmm0 = VPUNPCKHQDQZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPUNPCKHWDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPUNPCKHWDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPUNPCKHWDrr $xmm0, $xmm1
+ $xmm0 = VPUNPCKHWDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPUNPCKLBWrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPUNPCKLBWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPUNPCKLBWrr $xmm0, $xmm1
+ $xmm0 = VPUNPCKLBWZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPUNPCKLDQrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPUNPCKLDQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPUNPCKLDQrr $xmm0, $xmm1
+ $xmm0 = VPUNPCKLDQZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPUNPCKLQDQrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPUNPCKLQDQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPUNPCKLQDQrr $xmm0, $xmm1
+ $xmm0 = VPUNPCKLQDQZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPUNPCKLWDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPUNPCKLWDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPUNPCKLWDrr $xmm0, $xmm1
+ $xmm0 = VPUNPCKLWDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VUNPCKHPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VUNPCKHPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VUNPCKHPDrr $xmm0, $xmm1
+ $xmm0 = VUNPCKHPDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VUNPCKHPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VUNPCKHPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VUNPCKHPSrr $xmm0, $xmm1
+ $xmm0 = VUNPCKHPSZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VUNPCKLPDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VUNPCKLPDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VUNPCKLPDrr $xmm0, $xmm1
+ $xmm0 = VUNPCKLPDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VUNPCKLPSrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VUNPCKLPSZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VUNPCKLPSrr $xmm0, $xmm1
+ $xmm0 = VUNPCKLPSZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VFMADD132PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMADD132PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMADD132PDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMADD132PDZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMADD132PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMADD132PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMADD132PSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMADD132PSZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMADD213PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMADD213PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMADD213PDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMADD213PDZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMADD213PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMADD213PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMADD213PSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMADD213PSZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMADD231PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMADD231PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMADD231PDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMADD231PDZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMADD231PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMADD231PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMADD231PSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMADD231PSZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMADDSUB132PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMADDSUB132PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMADDSUB132PDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMADDSUB132PDZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMADDSUB132PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMADDSUB132PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMADDSUB132PSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMADDSUB132PSZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMADDSUB213PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMADDSUB213PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMADDSUB213PDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMADDSUB213PDZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMADDSUB213PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMADDSUB213PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMADDSUB213PSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMADDSUB213PSZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMADDSUB231PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMADDSUB231PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMADDSUB231PDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMADDSUB231PDZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMADDSUB231PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMADDSUB231PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMADDSUB231PSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMADDSUB231PSZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMSUB132PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMSUB132PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMSUB132PDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMSUB132PDZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMSUB132PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMSUB132PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMSUB132PSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMSUB132PSZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMSUB213PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMSUB213PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMSUB213PDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMSUB213PDZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMSUB213PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMSUB213PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMSUB213PSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMSUB213PSZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMSUB231PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMSUB231PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMSUB231PDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMSUB231PDZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMSUB231PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMSUB231PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMSUB231PSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMSUB231PSZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMSUBADD132PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMSUBADD132PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMSUBADD132PDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMSUBADD132PDZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMSUBADD132PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMSUBADD132PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMSUBADD132PSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMSUBADD132PSZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMSUBADD213PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMSUBADD213PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMSUBADD213PDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMSUBADD213PDZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMSUBADD213PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMSUBADD213PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMSUBADD213PSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMSUBADD213PSZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMSUBADD231PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMSUBADD231PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMSUBADD231PDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMSUBADD231PDZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMSUBADD231PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMSUBADD231PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMSUBADD231PSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMSUBADD231PSZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMADD132PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMADD132PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMADD132PDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMADD132PDZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMADD132PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMADD132PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMADD132PSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMADD132PSZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMADD213PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMADD213PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMADD213PDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMADD213PDZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMADD213PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMADD213PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMADD213PSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMADD213PSZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMADD231PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMADD231PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMADD231PDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMADD231PDZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMADD231PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMADD231PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMADD231PSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMADD231PSZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMSUB132PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMSUB132PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMSUB132PDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMSUB132PDZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMSUB132PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMSUB132PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMSUB132PSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMSUB132PSZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMSUB213PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMSUB213PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMSUB213PDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMSUB213PDZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMSUB213PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMSUB213PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMSUB213PSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMSUB213PSZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMSUB231PDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMSUB231PDZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMSUB231PDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMSUB231PDZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMSUB231PSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMSUB231PSZ128m $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMSUB231PSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMSUB231PSZ128r $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VPSLLDri $xmm0, 7
+ $xmm0 = VPSLLDZ128ri $xmm0, 7
+ ; CHECK: $xmm0 = VPSLLDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPSLLDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPSLLDrr $xmm0, 14
+ $xmm0 = VPSLLDZ128rr $xmm0, 14
+ ; CHECK: $xmm0 = VPSLLQri $xmm0, 7
+ $xmm0 = VPSLLQZ128ri $xmm0, 7
+ ; CHECK: $xmm0 = VPSLLQrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPSLLQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPSLLQrr $xmm0, 14
+ $xmm0 = VPSLLQZ128rr $xmm0, 14
+ ; CHECK: $xmm0 = VPSLLVDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPSLLVDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPSLLVDrr $xmm0, 14
+ $xmm0 = VPSLLVDZ128rr $xmm0, 14
+ ; CHECK: $xmm0 = VPSLLVQrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPSLLVQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPSLLVQrr $xmm0, 14
+ $xmm0 = VPSLLVQZ128rr $xmm0, 14
+ ; CHECK: $xmm0 = VPSLLWri $xmm0, 7
+ $xmm0 = VPSLLWZ128ri $xmm0, 7
+ ; CHECK: $xmm0 = VPSLLWrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPSLLWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPSLLWrr $xmm0, 14
+ $xmm0 = VPSLLWZ128rr $xmm0, 14
+ ; CHECK: $xmm0 = VPSRADri $xmm0, 7
+ $xmm0 = VPSRADZ128ri $xmm0, 7
+ ; CHECK: $xmm0 = VPSRADrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPSRADZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPSRADrr $xmm0, 14
+ $xmm0 = VPSRADZ128rr $xmm0, 14
+ ; CHECK: $xmm0 = VPSRAVDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPSRAVDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPSRAVDrr $xmm0, 14
+ $xmm0 = VPSRAVDZ128rr $xmm0, 14
+ ; CHECK: $xmm0 = VPSRAWri $xmm0, 7
+ $xmm0 = VPSRAWZ128ri $xmm0, 7
+ ; CHECK: $xmm0 = VPSRAWrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPSRAWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPSRAWrr $xmm0, 14
+ $xmm0 = VPSRAWZ128rr $xmm0, 14
+ ; CHECK: $xmm0 = VPSRLDQri $xmm0, 14
+ $xmm0 = VPSRLDQZ128rr $xmm0, 14
+ ; CHECK: $xmm0 = VPSRLDri $xmm0, 7
+ $xmm0 = VPSRLDZ128ri $xmm0, 7
+ ; CHECK: $xmm0 = VPSRLDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPSRLDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPSRLDrr $xmm0, 14
+ $xmm0 = VPSRLDZ128rr $xmm0, 14
+ ; CHECK: $xmm0 = VPSRLQri $xmm0, 7
+ $xmm0 = VPSRLQZ128ri $xmm0, 7
+ ; CHECK: $xmm0 = VPSRLQrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPSRLQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPSRLQrr $xmm0, 14
+ $xmm0 = VPSRLQZ128rr $xmm0, 14
+ ; CHECK: $xmm0 = VPSRLVDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPSRLVDZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPSRLVDrr $xmm0, 14
+ $xmm0 = VPSRLVDZ128rr $xmm0, 14
+ ; CHECK: $xmm0 = VPSRLVQrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPSRLVQZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPSRLVQrr $xmm0, 14
+ $xmm0 = VPSRLVQZ128rr $xmm0, 14
+ ; CHECK: $xmm0 = VPSRLWri $xmm0, 7
+ $xmm0 = VPSRLWZ128ri $xmm0, 7
+ ; CHECK: $xmm0 = VPSRLWrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPSRLWZ128rm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPSRLWrr $xmm0, 14
+ $xmm0 = VPSRLWZ128rr $xmm0, 14
+ ; CHECK: $xmm0 = VPERMILPDmi $rdi, 1, $noreg, 0, $noreg, $noreg
+ $xmm0 = VPERMILPDZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $xmm0 = VPERMILPDri $xmm0, 9
+ $xmm0 = VPERMILPDZ128ri $xmm0, 9
+ ; CHECK: $xmm0 = VPERMILPDrm $xmm0, $rdi, 1, $noreg, 0, $noreg
+ $xmm0 = VPERMILPDZ128rm $xmm0, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VPERMILPDrr $xmm0, $xmm1
+ $xmm0 = VPERMILPDZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPERMILPSmi $rdi, 1, $noreg, 0, $noreg, $noreg
+ $xmm0 = VPERMILPSZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $xmm0 = VPERMILPSri $xmm0, 9
+ $xmm0 = VPERMILPSZ128ri $xmm0, 9
+ ; CHECK: $xmm0 = VPERMILPSrm $xmm0, $rdi, 1, $noreg, 0, $noreg
+ $xmm0 = VPERMILPSZ128rm $xmm0, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VPERMILPSrr $xmm0, $xmm1
+ $xmm0 = VPERMILPSZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VCVTPH2PSrm $rdi, $xmm0, 1, $noreg, 0
+ $xmm0 = VCVTPH2PSZ128rm $rdi, $xmm0, 1, $noreg, 0
+ ; CHECK: $xmm0 = VCVTPH2PSrr $xmm0
+ $xmm0 = VCVTPH2PSZ128rr $xmm0
+ ; CHECK: $xmm0 = VCVTDQ2PDrm $rdi, $xmm0, 1, $noreg, 0
+ $xmm0 = VCVTDQ2PDZ128rm $rdi, $xmm0, 1, $noreg, 0
+ ; CHECK: $xmm0 = VCVTDQ2PDrr $xmm0
+ $xmm0 = VCVTDQ2PDZ128rr $xmm0
+ ; CHECK: $xmm0 = VCVTDQ2PSrm $rdi, $xmm0, 1, $noreg, 0
+ $xmm0 = VCVTDQ2PSZ128rm $rdi, $xmm0, 1, $noreg, 0
+ ; CHECK: $xmm0 = VCVTDQ2PSrr $xmm0
+ $xmm0 = VCVTDQ2PSZ128rr $xmm0
+ ; CHECK: $xmm0 = VCVTPD2DQrm $rdi, $xmm0, 1, $noreg, 0
+ $xmm0 = VCVTPD2DQZ128rm $rdi, $xmm0, 1, $noreg, 0
+ ; CHECK: $xmm0 = VCVTPD2DQrr $xmm0
+ $xmm0 = VCVTPD2DQZ128rr $xmm0
+ ; CHECK: $xmm0 = VCVTPD2PSrm $rdi, $xmm0, 1, $noreg, 0
+ $xmm0 = VCVTPD2PSZ128rm $rdi, $xmm0, 1, $noreg, 0
+ ; CHECK: $xmm0 = VCVTPD2PSrr $xmm0
+ $xmm0 = VCVTPD2PSZ128rr $xmm0
+ ; CHECK: $xmm0 = VCVTPS2DQrm $rdi, $xmm0, 1, $noreg, 0
+ $xmm0 = VCVTPS2DQZ128rm $rdi, $xmm0, 1, $noreg, 0
+ ; CHECK: $xmm0 = VCVTPS2DQrr $xmm0
+ $xmm0 = VCVTPS2DQZ128rr $xmm0
+ ; CHECK: $xmm0 = VCVTPS2PDrm $rdi, $xmm0, 1, $noreg, 0
+ $xmm0 = VCVTPS2PDZ128rm $rdi, $xmm0, 1, $noreg, 0
+ ; CHECK: $xmm0 = VCVTPS2PDrr $xmm0
+ $xmm0 = VCVTPS2PDZ128rr $xmm0
+ ; CHECK: $xmm0 = VCVTTPD2DQrm $rdi, $xmm0, 1, $noreg, 0
+ $xmm0 = VCVTTPD2DQZ128rm $rdi, $xmm0, 1, $noreg, 0
+ ; CHECK: $xmm0 = VCVTTPD2DQrr $xmm0
+ $xmm0 = VCVTTPD2DQZ128rr $xmm0
+ ; CHECK: $xmm0 = VCVTTPS2DQrm $rdi, $xmm0, 1, $noreg, 0
+ $xmm0 = VCVTTPS2DQZ128rm $rdi, $xmm0, 1, $noreg, 0
+ ; CHECK: $xmm0 = VCVTTPS2DQrr $xmm0
+ $xmm0 = VCVTTPS2DQZ128rr $xmm0
+ ; CHECK: $xmm0 = VSQRTPDm $rdi, $noreg, $noreg, $noreg, $noreg
+ $xmm0 = VSQRTPDZ128m $rdi, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm0 = VSQRTPDr $xmm0
+ $xmm0 = VSQRTPDZ128r $xmm0
+ ; CHECK: $xmm0 = VSQRTPSm $rdi, $noreg, $noreg, $noreg, $noreg
+ $xmm0 = VSQRTPSZ128m $rdi, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm0 = VSQRTPSr $xmm0
+ $xmm0 = VSQRTPSZ128r $xmm0
+ ; CHECK: $xmm0 = VMOVDDUPrm $rdi, 1, $noreg, 0, $noreg
+ $xmm0 = VMOVDDUPZ128rm $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VMOVDDUPrr $xmm0
+ $xmm0 = VMOVDDUPZ128rr $xmm0
+ ; CHECK: $xmm0 = VMOVSHDUPrm $rdi, 1, $noreg, 0, $noreg
+ $xmm0 = VMOVSHDUPZ128rm $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VMOVSHDUPrr $xmm0
+ $xmm0 = VMOVSHDUPZ128rr $xmm0
+ ; CHECK: $xmm0 = VMOVSLDUPrm $rdi, 1, $noreg, 0, $noreg
+ $xmm0 = VMOVSLDUPZ128rm $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VMOVSLDUPrr $xmm0
+ $xmm0 = VMOVSLDUPZ128rr $xmm0
+ ; CHECK: $xmm0 = VPSHUFBrm $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg
+ $xmm0 = VPSHUFBZ128rm $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm0 = VPSHUFBrr $xmm0, $xmm1
+ $xmm0 = VPSHUFBZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VPSHUFDmi $rdi, 1, $noreg, 0, $noreg, $noreg
+ $xmm0 = VPSHUFDZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $xmm0 = VPSHUFDri $xmm0, -24
+ $xmm0 = VPSHUFDZ128ri $xmm0, -24
+ ; CHECK: $xmm0 = VPSHUFHWmi $rdi, 1, $noreg, 0, $noreg, $noreg
+ $xmm0 = VPSHUFHWZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $xmm0 = VPSHUFHWri $xmm0, -24
+ $xmm0 = VPSHUFHWZ128ri $xmm0, -24
+ ; CHECK: $xmm0 = VPSHUFLWmi $rdi, 1, $noreg, 0, $noreg, $noreg
+ $xmm0 = VPSHUFLWZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $xmm0 = VPSHUFLWri $xmm0, -24
+ $xmm0 = VPSHUFLWZ128ri $xmm0, -24
+ ; CHECK: $xmm0 = VPSLLDQri $xmm0, $xmm1
+ $xmm0 = VPSLLDQZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VSHUFPDrmi $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg
+ $xmm0 = VSHUFPDZ128rmi $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm0 = VSHUFPDrri $xmm0, $noreg, $noreg
+ $xmm0 = VSHUFPDZ128rri $xmm0, $noreg, $noreg
+ ; CHECK: $xmm0 = VSHUFPSrmi $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg
+ $xmm0 = VSHUFPSZ128rmi $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm0 = VSHUFPSrri $xmm0, $noreg, $noreg
+ $xmm0 = VSHUFPSZ128rri $xmm0, $noreg, $noreg
+ ; CHECK: $xmm0 = VPSADBWrm $xmm0, 1, $noreg, $rax, $noreg, $noreg
+ $xmm0 = VPSADBWZ128rm $xmm0, 1, $noreg, $rax, $noreg, $noreg
+ ; CHECK: $xmm0 = VPSADBWrr $xmm0, $xmm1
+ $xmm0 = VPSADBWZ128rr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VBROADCASTSSrm $rip, $noreg, $noreg, $noreg, $noreg
+ $xmm0 = VBROADCASTSSZ128m $rip, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm0 = VBROADCASTSSrr $xmm0
+ $xmm0 = VBROADCASTSSZ128r $xmm0
+ ; CHECK: $xmm0 = VPBROADCASTBrm $rip, $noreg, $noreg, $noreg, $noreg
+ $xmm0 = VPBROADCASTBZ128m $rip, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm0 = VPBROADCASTBrr $xmm0
+ $xmm0 = VPBROADCASTBZ128r $xmm0
+ ; CHECK: $xmm0 = VPBROADCASTDrm $rip, $noreg, $noreg, $noreg, $noreg
+ $xmm0 = VPBROADCASTDZ128m $rip, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm0 = VPBROADCASTDrr $xmm0
+ $xmm0 = VPBROADCASTDZ128r $xmm0
+ ; CHECK: $xmm0 = VPBROADCASTQrm $rip, $noreg, $noreg, $noreg, $noreg
+ $xmm0 = VPBROADCASTQZ128m $rip, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm0 = VPBROADCASTQrr $xmm0
+ $xmm0 = VPBROADCASTQZ128r $xmm0
+ ; CHECK: $xmm0 = VPBROADCASTWrm $rip, $noreg, $noreg, $noreg, $noreg
+ $xmm0 = VPBROADCASTWZ128m $rip, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm0 = VPBROADCASTWrr $xmm0
+ $xmm0 = VPBROADCASTWZ128r $xmm0
+ ; CHECK: $xmm0 = VPBROADCASTQrm $rip, $noreg, $noreg, $noreg, $noreg
+ $xmm0 = VBROADCASTI32X2Z128m $rip, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm0 = VPBROADCASTQrr $xmm0
+ $xmm0 = VBROADCASTI32X2Z128r $xmm0
+ ; CHECK: $xmm0 = VCVTPS2PHrr $xmm0, 2
+ $xmm0 = VCVTPS2PHZ128rr $xmm0, 2
+ ; CHECK: VCVTPS2PHmr $rdi, $xmm0, 1, $noreg, 0, $noreg, $noreg
+ VCVTPS2PHZ128mr $rdi, $xmm0, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $xmm0 = VPABSBrm $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPABSBZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPABSBrr $xmm0
+ $xmm0 = VPABSBZ128rr $xmm0
+ ; CHECK: $xmm0 = VPABSDrm $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPABSDZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPABSDrr $xmm0
+ $xmm0 = VPABSDZ128rr $xmm0
+ ; CHECK: $xmm0 = VPABSWrm $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VPABSWZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VPABSWrr $xmm0
+ $xmm0 = VPABSWZ128rr $xmm0
+ ; CHECK: $xmm0 = VPALIGNRrmi $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg
+ $xmm0 = VPALIGNRZ128rmi $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm0 = VPALIGNRrri $xmm0, $xmm1, 15
+ $xmm0 = VPALIGNRZ128rri $xmm0, $xmm1, 15
- RET 0, %zmm0, %zmm1
+ RET 0, $zmm0, $zmm1
...
---
# CHECK-LABEL: name: evex_scalar_to_vex_test
@@ -1770,554 +1770,554 @@
body: |
bb.0:
- ; CHECK: %xmm0 = VADDSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VADDSDZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VADDSDrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VADDSDZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VADDSDrr %xmm0, %xmm1
- %xmm0 = VADDSDZrr %xmm0, %xmm1
- ; CHECK: %xmm0 = VADDSDrr_Int %xmm0, %xmm1
- %xmm0 = VADDSDZrr_Int %xmm0, %xmm1
- ; CHECK: %xmm0 = VADDSSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VADDSSZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VADDSSrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VADDSSZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VADDSSrr %xmm0, %xmm1
- %xmm0 = VADDSSZrr %xmm0, %xmm1
- ; CHECK: %xmm0 = VADDSSrr_Int %xmm0, %xmm1
- %xmm0 = VADDSSZrr_Int %xmm0, %xmm1
- ; CHECK: %xmm0 = VDIVSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VDIVSDZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VDIVSDrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VDIVSDZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VDIVSDrr %xmm0, %xmm1
- %xmm0 = VDIVSDZrr %xmm0, %xmm1
- ; CHECK: %xmm0 = VDIVSDrr_Int %xmm0, %xmm1
- %xmm0 = VDIVSDZrr_Int %xmm0, %xmm1
- ; CHECK: %xmm0 = VDIVSSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VDIVSSZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VDIVSSrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VDIVSSZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VDIVSSrr %xmm0, %xmm1
- %xmm0 = VDIVSSZrr %xmm0, %xmm1
- ; CHECK: %xmm0 = VDIVSSrr_Int %xmm0, %xmm1
- %xmm0 = VDIVSSZrr_Int %xmm0, %xmm1
- ; CHECK: %xmm0 = VMAXCSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMAXCSDZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMAXCSDrr %xmm0, %xmm1
- %xmm0 = VMAXCSDZrr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMAXCSSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMAXCSSZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMAXCSSrr %xmm0, %xmm1
- %xmm0 = VMAXCSSZrr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMAXCSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMAXSDZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMAXSDrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMAXSDZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMAXCSDrr %xmm0, %xmm1
- %xmm0 = VMAXSDZrr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMAXSDrr_Int %xmm0, %xmm1
- %xmm0 = VMAXSDZrr_Int %xmm0, %xmm1
- ; CHECK: %xmm0 = VMAXCSSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMAXSSZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMAXSSrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMAXSSZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMAXCSSrr %xmm0, %xmm1
- %xmm0 = VMAXSSZrr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMAXSSrr_Int %xmm0, %xmm1
- %xmm0 = VMAXSSZrr_Int %xmm0, %xmm1
- ; CHECK: %xmm0 = VMINCSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMINCSDZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMINCSDrr %xmm0, %xmm1
- %xmm0 = VMINCSDZrr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMINCSSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMINCSSZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMINCSSrr %xmm0, %xmm1
- %xmm0 = VMINCSSZrr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMINCSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMINSDZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMINSDrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMINSDZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMINCSDrr %xmm0, %xmm1
- %xmm0 = VMINSDZrr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMINSDrr_Int %xmm0, %xmm1
- %xmm0 = VMINSDZrr_Int %xmm0, %xmm1
- ; CHECK: %xmm0 = VMINCSSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMINSSZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMINSSrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMINSSZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMINCSSrr %xmm0, %xmm1
- %xmm0 = VMINSSZrr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMINSSrr_Int %xmm0, %xmm1
- %xmm0 = VMINSSZrr_Int %xmm0, %xmm1
- ; CHECK: %xmm0 = VMULSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMULSDZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMULSDrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMULSDZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMULSDrr %xmm0, %xmm1
- %xmm0 = VMULSDZrr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMULSDrr_Int %xmm0, %xmm1
- %xmm0 = VMULSDZrr_Int %xmm0, %xmm1
- ; CHECK: %xmm0 = VMULSSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMULSSZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMULSSrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VMULSSZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VMULSSrr %xmm0, %xmm1
- %xmm0 = VMULSSZrr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMULSSrr_Int %xmm0, %xmm1
- %xmm0 = VMULSSZrr_Int %xmm0, %xmm1
- ; CHECK: %xmm0 = VSUBSDrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VSUBSDZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VSUBSDrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VSUBSDZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VSUBSDrr %xmm0, %xmm1
- %xmm0 = VSUBSDZrr %xmm0, %xmm1
- ; CHECK: %xmm0 = VSUBSDrr_Int %xmm0, %xmm1
- %xmm0 = VSUBSDZrr_Int %xmm0, %xmm1
- ; CHECK: %xmm0 = VSUBSSrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VSUBSSZrm %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VSUBSSrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
- %xmm0 = VSUBSSZrm_Int %xmm0, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm0 = VSUBSSrr %xmm0, %xmm1
- %xmm0 = VSUBSSZrr %xmm0, %xmm1
- ; CHECK: %xmm0 = VSUBSSrr_Int %xmm0, %xmm1
- %xmm0 = VSUBSSZrr_Int %xmm0, %xmm1
- ; CHECK: %xmm0 = VFMADD132SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMADD132SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMADD132SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMADD132SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMADD132SDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMADD132SDZr %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADD132SDr_Int %xmm0, %xmm1, %xmm2
- %xmm0 = VFMADD132SDZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADD132SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMADD132SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMADD132SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMADD132SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMADD132SSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMADD132SSZr %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADD132SSr_Int %xmm0, %xmm1, %xmm2
- %xmm0 = VFMADD132SSZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADD213SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMADD213SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMADD213SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMADD213SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMADD213SDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMADD213SDZr %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADD213SDr_Int %xmm0, %xmm1, %xmm2
- %xmm0 = VFMADD213SDZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADD213SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMADD213SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMADD213SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMADD213SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMADD213SSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMADD213SSZr %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADD213SSr_Int %xmm0, %xmm1, %xmm2
- %xmm0 = VFMADD213SSZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADD231SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMADD231SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMADD231SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMADD231SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMADD231SDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMADD231SDZr %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADD231SDr_Int %xmm0, %xmm1, %xmm2
- %xmm0 = VFMADD231SDZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADD231SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMADD231SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMADD231SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMADD231SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMADD231SSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMADD231SSZr %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMADD231SSr_Int %xmm0, %xmm1, %xmm2
- %xmm0 = VFMADD231SSZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB132SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMSUB132SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMSUB132SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMSUB132SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMSUB132SDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMSUB132SDZr %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB132SDr_Int %xmm0, %xmm1, %xmm2
- %xmm0 = VFMSUB132SDZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB132SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMSUB132SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMSUB132SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMSUB132SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMSUB132SSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMSUB132SSZr %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB132SSr_Int %xmm0, %xmm1, %xmm2
- %xmm0 = VFMSUB132SSZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB213SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMSUB213SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMSUB213SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMSUB213SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMSUB213SDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMSUB213SDZr %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB213SDr_Int %xmm0, %xmm1, %xmm2
- %xmm0 = VFMSUB213SDZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB213SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMSUB213SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMSUB213SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMSUB213SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMSUB213SSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMSUB213SSZr %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB213SSr_Int %xmm0, %xmm1, %xmm2
- %xmm0 = VFMSUB213SSZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB231SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMSUB231SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMSUB231SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMSUB231SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMSUB231SDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMSUB231SDZr %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB231SDr_Int %xmm0, %xmm1, %xmm2
- %xmm0 = VFMSUB231SDZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB231SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMSUB231SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMSUB231SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFMSUB231SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFMSUB231SSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFMSUB231SSZr %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFMSUB231SSr_Int %xmm0, %xmm1, %xmm2
- %xmm0 = VFMSUB231SSZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD132SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMADD132SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMADD132SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMADD132SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMADD132SDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMADD132SDZr %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD132SDr_Int %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMADD132SDZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD132SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMADD132SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMADD132SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMADD132SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMADD132SSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMADD132SSZr %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD132SSr_Int %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMADD132SSZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD213SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMADD213SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMADD213SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMADD213SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMADD213SDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMADD213SDZr %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD213SDr_Int %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMADD213SDZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD213SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMADD213SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMADD213SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMADD213SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMADD213SSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMADD213SSZr %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD213SSr_Int %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMADD213SSZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD231SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMADD231SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMADD231SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMADD231SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMADD231SDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMADD231SDZr %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD231SDr_Int %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMADD231SDZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD231SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMADD231SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMADD231SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMADD231SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMADD231SSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMADD231SSZr %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMADD231SSr_Int %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMADD231SSZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB132SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMSUB132SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMSUB132SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMSUB132SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMSUB132SDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMSUB132SDZr %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB132SDr_Int %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMSUB132SDZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB132SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMSUB132SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMSUB132SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMSUB132SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMSUB132SSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMSUB132SSZr %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB132SSr_Int %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMSUB132SSZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB213SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMSUB213SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMSUB213SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMSUB213SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMSUB213SDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMSUB213SDZr %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB213SDr_Int %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMSUB213SDZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB213SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMSUB213SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMSUB213SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMSUB213SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMSUB213SSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMSUB213SSZr %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB213SSr_Int %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMSUB213SSZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB231SDm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMSUB231SDZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMSUB231SDm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMSUB231SDZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMSUB231SDr %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMSUB231SDZr %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB231SDr_Int %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMSUB231SDZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB231SSm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMSUB231SSZm %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMSUB231SSm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- %xmm0 = VFNMSUB231SSZm_Int %xmm0, %xmm0, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VFNMSUB231SSr %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMSUB231SSZr %xmm0, %xmm1, %xmm2
- ; CHECK: %xmm0 = VFNMSUB231SSr_Int %xmm0, %xmm1, %xmm2
- %xmm0 = VFNMSUB231SSZr_Int %xmm0, %xmm1, %xmm2
- ; CHECK: VPEXTRBmr %rdi, 1, %noreg, 0, %noreg, %xmm0, 3
- VPEXTRBZmr %rdi, 1, %noreg, 0, %noreg, %xmm0, 3
- ; CHECK: %eax = VPEXTRBrr %xmm0, 1
- %eax = VPEXTRBZrr %xmm0, 1
- ; CHECK: VPEXTRDmr %rdi, 1, %noreg, 0, %noreg, %xmm0, 3
- VPEXTRDZmr %rdi, 1, %noreg, 0, %noreg, %xmm0, 3
- ; CHECK: %eax = VPEXTRDrr %xmm0, 1
- %eax = VPEXTRDZrr %xmm0, 1
- ; CHECK: VPEXTRQmr %rdi, 1, %noreg, 0, %noreg, %xmm0, 3
- VPEXTRQZmr %rdi, 1, %noreg, 0, %noreg, %xmm0, 3
- ; CHECK: %rax = VPEXTRQrr %xmm0, 1
- %rax = VPEXTRQZrr %xmm0, 1
- ; CHECK: VPEXTRWmr %rdi, 1, %noreg, 0, %noreg, %xmm0, 3
- VPEXTRWZmr %rdi, 1, %noreg, 0, %noreg, %xmm0, 3
- ; CHECK: %eax = VPEXTRWrr %xmm0, 1
- %eax = VPEXTRWZrr %xmm0, 1
- ; CHECK: %eax = VPEXTRWrr_REV %xmm0, 1
- %eax = VPEXTRWZrr_REV %xmm0, 1
- ; CHECK: %xmm0 = VPINSRBrm %xmm0, %rsi, 1, %noreg, 0, %noreg, 3
- %xmm0 = VPINSRBZrm %xmm0, %rsi, 1, %noreg, 0, %noreg, 3
- ; CHECK: %xmm0 = VPINSRBrr %xmm0, %edi, 5
- %xmm0 = VPINSRBZrr %xmm0, %edi, 5
- ; CHECK: %xmm0 = VPINSRDrm %xmm0, %rsi, 1, %noreg, 0, %noreg, 3
- %xmm0 = VPINSRDZrm %xmm0, %rsi, 1, %noreg, 0, %noreg, 3
- ; CHECK: %xmm0 = VPINSRDrr %xmm0, %edi, 5
- %xmm0 = VPINSRDZrr %xmm0, %edi, 5
- ; CHECK: %xmm0 = VPINSRQrm %xmm0, %rsi, 1, %noreg, 0, %noreg, 3
- %xmm0 = VPINSRQZrm %xmm0, %rsi, 1, %noreg, 0, %noreg, 3
- ; CHECK: %xmm0 = VPINSRQrr %xmm0, %rdi, 5
- %xmm0 = VPINSRQZrr %xmm0, %rdi, 5
- ; CHECK: %xmm0 = VPINSRWrm %xmm0, %rsi, 1, %noreg, 0, %noreg, 3
- %xmm0 = VPINSRWZrm %xmm0, %rsi, 1, %noreg, 0, %noreg, 3
- ; CHECK: %xmm0 = VPINSRWrr %xmm0, %edi, 5
- %xmm0 = VPINSRWZrr %xmm0, %edi, 5
- ; CHECK: %xmm0 = VSQRTSDm %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg
- %xmm0 = VSQRTSDZm %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm0 = VSQRTSDm_Int %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg
- %xmm0 = VSQRTSDZm_Int %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm0 = VSQRTSDr %xmm0, %noreg
- %xmm0 = VSQRTSDZr %xmm0, %noreg
- ; CHECK: %xmm0 = VSQRTSDr_Int %xmm0, %noreg
- %xmm0 = VSQRTSDZr_Int %xmm0, %noreg
- ; CHECK: %xmm0 = VSQRTSSm %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg
- %xmm0 = VSQRTSSZm %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm0 = VSQRTSSm_Int %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg
- %xmm0 = VSQRTSSZm_Int %xmm0, %noreg, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm0 = VSQRTSSr %xmm0, %noreg
- %xmm0 = VSQRTSSZr %xmm0, %noreg
- ; CHECK: %xmm0 = VSQRTSSr_Int %xmm0, %noreg
- %xmm0 = VSQRTSSZr_Int %xmm0, %noreg
- ; CHECK: %rdi = VCVTSD2SI64rr_Int %xmm0
- %rdi = VCVTSD2SI64Zrr_Int %xmm0
- ; CHECK: %edi = VCVTSD2SIrr_Int %xmm0
- %edi = VCVTSD2SIZrr_Int %xmm0
- ; CHECK: %xmm0 = VCVTSD2SSrm %xmm0, %rdi, 1, %noreg, 0, %noreg
- %xmm0 = VCVTSD2SSZrm %xmm0, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VCVTSD2SSrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg
- %xmm0 = VCVTSD2SSZrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VCVTSD2SSrr %xmm0, %noreg
- %xmm0 = VCVTSD2SSZrr %xmm0, %noreg
- ; CHECK: %xmm0 = VCVTSD2SSrr_Int %xmm0, %noreg
- %xmm0 = VCVTSD2SSZrr_Int %xmm0, %noreg
- ; CHECK: %xmm0 = VCVTSI2SDrm %xmm0, %rdi, 1, %noreg, 0, %noreg
- %xmm0 = VCVTSI2SDZrm %xmm0, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VCVTSI2SDrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg
- %xmm0 = VCVTSI2SDZrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VCVTSI2SDrr %xmm0, %noreg
- %xmm0 = VCVTSI2SDZrr %xmm0, %noreg
- ; CHECK: %xmm0 = VCVTSI2SDrr_Int %xmm0, %noreg
- %xmm0 = VCVTSI2SDZrr_Int %xmm0, %noreg
- ; CHECK: %xmm0 = VCVTSI2SSrm %xmm0, %rdi, 1, %noreg, 0, %noreg
- %xmm0 = VCVTSI2SSZrm %xmm0, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VCVTSI2SSrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg
- %xmm0 = VCVTSI2SSZrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VCVTSI2SSrr %xmm0, %noreg
- %xmm0 = VCVTSI2SSZrr %xmm0, %noreg
- ; CHECK: %xmm0 = VCVTSI2SSrr_Int %xmm0, %noreg
- %xmm0 = VCVTSI2SSZrr_Int %xmm0, %noreg
- ; CHECK: %xmm0 = VCVTSI642SDrm %xmm0, %rdi, 1, %noreg, 0, %noreg
- %xmm0 = VCVTSI642SDZrm %xmm0, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VCVTSI642SDrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg
- %xmm0 = VCVTSI642SDZrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VCVTSI642SDrr %xmm0, %noreg
- %xmm0 = VCVTSI642SDZrr %xmm0, %noreg
- ; CHECK: %xmm0 = VCVTSI642SDrr_Int %xmm0, %noreg
- %xmm0 = VCVTSI642SDZrr_Int %xmm0, %noreg
- ; CHECK: %xmm0 = VCVTSI642SSrm %xmm0, %rdi, 1, %noreg, 0, %noreg
- %xmm0 = VCVTSI642SSZrm %xmm0, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VCVTSI642SSrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg
- %xmm0 = VCVTSI642SSZrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VCVTSI642SSrr %xmm0, %noreg
- %xmm0 = VCVTSI642SSZrr %xmm0, %noreg
- ; CHECK: %xmm0 = VCVTSI642SSrr_Int %xmm0, %noreg
- %xmm0 = VCVTSI642SSZrr_Int %xmm0, %noreg
- ; CHECK: %xmm0 = VCVTSS2SDrm %xmm0, %rdi, 1, %noreg, 0, %noreg
- %xmm0 = VCVTSS2SDZrm %xmm0, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VCVTSS2SDrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg
- %xmm0 = VCVTSS2SDZrm_Int %xmm0, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm0 = VCVTSS2SDrr %xmm0, %noreg
- %xmm0 = VCVTSS2SDZrr %xmm0, %noreg
- ; CHECK: %xmm0 = VCVTSS2SDrr_Int %xmm0, %noreg
- %xmm0 = VCVTSS2SDZrr_Int %xmm0, %noreg
- ; CHECK: %rdi = VCVTSS2SI64rm_Int %rdi, %xmm0, 1, %noreg, 0
- %rdi = VCVTSS2SI64Zrm_Int %rdi, %xmm0, 1, %noreg, 0
- ; CHECK: %rdi = VCVTSS2SI64rr_Int %xmm0
- %rdi = VCVTSS2SI64Zrr_Int %xmm0
- ; CHECK: %edi = VCVTSS2SIrm_Int %rdi, %xmm0, 1, %noreg, 0
- %edi = VCVTSS2SIZrm_Int %rdi, %xmm0, 1, %noreg, 0
- ; CHECK: %edi = VCVTSS2SIrr_Int %xmm0
- %edi = VCVTSS2SIZrr_Int %xmm0
- ; CHECK: %rdi = VCVTTSD2SI64rm %rdi, %xmm0, 1, %noreg, 0
- %rdi = VCVTTSD2SI64Zrm %rdi, %xmm0, 1, %noreg, 0
- ; CHECK: %rdi = VCVTTSD2SI64rm_Int %rdi, %xmm0, 1, %noreg, 0
- %rdi = VCVTTSD2SI64Zrm_Int %rdi, %xmm0, 1, %noreg, 0
- ; CHECK: %rdi = VCVTTSD2SI64rr %xmm0
- %rdi = VCVTTSD2SI64Zrr %xmm0
- ; CHECK: %rdi = VCVTTSD2SI64rr_Int %xmm0
- %rdi = VCVTTSD2SI64Zrr_Int %xmm0
- ; CHECK: %edi = VCVTTSD2SIrm %rdi, %xmm0, 1, %noreg, 0
- %edi = VCVTTSD2SIZrm %rdi, %xmm0, 1, %noreg, 0
- ; CHECK: %edi = VCVTTSD2SIrm_Int %rdi, %xmm0, 1, %noreg, 0
- %edi = VCVTTSD2SIZrm_Int %rdi, %xmm0, 1, %noreg, 0
- ; CHECK: %edi = VCVTTSD2SIrr %xmm0
- %edi = VCVTTSD2SIZrr %xmm0
- ; CHECK: %edi = VCVTTSD2SIrr_Int %xmm0
- %edi = VCVTTSD2SIZrr_Int %xmm0
- ; CHECK: %rdi = VCVTTSS2SI64rm %rdi, %xmm0, 1, %noreg, 0
- %rdi = VCVTTSS2SI64Zrm %rdi, %xmm0, 1, %noreg, 0
- ; CHECK: %rdi = VCVTTSS2SI64rm_Int %rdi, %xmm0, 1, %noreg, 0
- %rdi = VCVTTSS2SI64Zrm_Int %rdi, %xmm0, 1, %noreg, 0
- ; CHECK: %rdi = VCVTTSS2SI64rr %xmm0
- %rdi = VCVTTSS2SI64Zrr %xmm0
- ; CHECK: %rdi = VCVTTSS2SI64rr_Int %xmm0
- %rdi = VCVTTSS2SI64Zrr_Int %xmm0
- ; CHECK: %edi = VCVTTSS2SIrm %rdi, %xmm0, 1, %noreg, 0
- %edi = VCVTTSS2SIZrm %rdi, %xmm0, 1, %noreg, 0
- ; CHECK: %edi = VCVTTSS2SIrm_Int %rdi, %xmm0, 1, %noreg, 0
- %edi = VCVTTSS2SIZrm_Int %rdi, %xmm0, 1, %noreg, 0
- ; CHECK: %edi = VCVTTSS2SIrr %xmm0
- %edi = VCVTTSS2SIZrr %xmm0
- ; CHECK: %edi = VCVTTSS2SIrr_Int %xmm0
- %edi = VCVTTSS2SIZrr_Int %xmm0
- ; CHECK: %xmm0 = VMOV64toSDrr %rdi
- %xmm0 = VMOV64toSDZrr %rdi
- ; CHECK: %xmm0 = VMOVDI2SSrm %rip, %noreg, %noreg, %noreg, %noreg
- %xmm0 = VMOVDI2SSZrm %rip, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm0 = VMOVDI2SSrr %eax
- %xmm0 = VMOVDI2SSZrr %eax
- ; CHECK: VMOVSDmr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
- VMOVSDZmr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm0 = VMOVSDrm %rip, %noreg, %noreg, %noreg, %noreg
- %xmm0 = VMOVSDZrm %rip, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm0 = VMOVSDrr %xmm0, %noreg
- %xmm0 = VMOVSDZrr %xmm0, %noreg
- ; CHECK: %xmm0 = VMOVSDrr_REV %xmm0, %noreg
- %xmm0 = VMOVSDZrr_REV %xmm0, %noreg
- ; CHECK: %rax = VMOVSDto64rr %xmm0
- %rax = VMOVSDto64Zrr %xmm0
- ; CHECK: VMOVSDto64mr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
- VMOVSDto64Zmr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
- ; CHECK: VMOVSSmr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
- VMOVSSZmr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm0 = VMOVSSrm %rip, %noreg, %noreg, %noreg, %noreg
- %xmm0 = VMOVSSZrm %rip, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm0 = VMOVSSrr %xmm0, %noreg
- %xmm0 = VMOVSSZrr %xmm0, %noreg
- ; CHECK: %xmm0 = VMOVSSrr_REV %xmm0, %noreg
- %xmm0 = VMOVSSZrr_REV %xmm0, %noreg
- ; CHECK: VMOVSS2DImr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
- VMOVSS2DIZmr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %eax = VMOVSS2DIrr %xmm0
- %eax = VMOVSS2DIZrr %xmm0
- ; CHECK: %xmm0 = VMOV64toPQIrr %rdi
- %xmm0 = VMOV64toPQIZrr %rdi
- ; CHECK: %xmm0 = VMOV64toPQIrm %rdi, %noreg, %noreg, %noreg, %noreg
- %xmm0 = VMOV64toPQIZrm %rdi, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm0 = VMOV64toSDrr %rdi
- %xmm0 = VMOV64toSDZrr %rdi
- ; CHECK: %xmm0 = VMOVDI2PDIrm %rip, %noreg, %noreg, %noreg, %noreg
- %xmm0 = VMOVDI2PDIZrm %rip, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm0 = VMOVDI2PDIrr %edi
- %xmm0 = VMOVDI2PDIZrr %edi
- ; CHECK: %xmm0 = VMOVLHPSrr %xmm0, %noreg
- %xmm0 = VMOVLHPSZrr %xmm0, %noreg
- ; CHECK: %xmm0 = VMOVHLPSrr %xmm0, %noreg
- %xmm0 = VMOVHLPSZrr %xmm0, %noreg
- ; CHECK: VMOVPDI2DImr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
- VMOVPDI2DIZmr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %edi = VMOVPDI2DIrr %xmm0
- %edi = VMOVPDI2DIZrr %xmm0
- ; CHECK: %xmm0 = VMOVPQI2QIrr %xmm0
- %xmm0 = VMOVPQI2QIZrr %xmm0
- ; CHECK: VMOVPQI2QImr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
- VMOVPQI2QIZmr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %rdi = VMOVPQIto64rr %xmm0
- %rdi = VMOVPQIto64Zrr %xmm0
- ; CHECK: VMOVPQIto64mr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
- VMOVPQIto64Zmr %rdi, %xmm0, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm0 = VMOVQI2PQIrm %rip, %noreg, %noreg, %noreg, %noreg
- %xmm0 = VMOVQI2PQIZrm %rip, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm0 = VMOVZPQILo2PQIrr %xmm0
- %xmm0 = VMOVZPQILo2PQIZrr %xmm0
- ; CHECK: VCOMISDrm_Int %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- VCOMISDZrm_Int %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- ; CHECK: VCOMISDrr_Int %xmm0, %xmm1, implicit-def %eflags
- VCOMISDZrr_Int %xmm0, %xmm1, implicit-def %eflags
- ; CHECK: VCOMISSrm_Int %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- VCOMISSZrm_Int %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- ; CHECK: VCOMISSrr_Int %xmm0, %xmm1, implicit-def %eflags
- VCOMISSZrr_Int %xmm0, %xmm1, implicit-def %eflags
- ; CHECK: VUCOMISDrm_Int %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- VUCOMISDZrm_Int %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- ; CHECK: VUCOMISDrr_Int %xmm0, %xmm1, implicit-def %eflags
- VUCOMISDZrr_Int %xmm0, %xmm1, implicit-def %eflags
- ; CHECK: VUCOMISSrm_Int %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- VUCOMISSZrm_Int %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- ; CHECK: VUCOMISSrr_Int %xmm0, %xmm1, implicit-def %eflags
- VUCOMISSZrr_Int %xmm0, %xmm1, implicit-def %eflags
- ; CHECK: VCOMISDrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- VCOMISDZrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- ; CHECK: VCOMISDrr %xmm0, %xmm1, implicit-def %eflags
- VCOMISDZrr %xmm0, %xmm1, implicit-def %eflags
- ; CHECK: VCOMISSrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- VCOMISSZrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- ; CHECK: VCOMISSrr %xmm0, %xmm1, implicit-def %eflags
- VCOMISSZrr %xmm0, %xmm1, implicit-def %eflags
- ; CHECK: VUCOMISDrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- VUCOMISDZrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- ; CHECK: VUCOMISDrr %xmm0, %xmm1, implicit-def %eflags
- VUCOMISDZrr %xmm0, %xmm1, implicit-def %eflags
- ; CHECK: VUCOMISSrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- VUCOMISSZrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- ; CHECK: VUCOMISSrr %xmm0, %xmm1, implicit-def %eflags
- VUCOMISSZrr %xmm0, %xmm1, implicit-def %eflags
- ; CHECK: VEXTRACTPSmr %rdi, 1, %noreg, 0, %noreg, %xmm0, %noreg
- VEXTRACTPSZmr %rdi, 1, %noreg, 0, %noreg, %xmm0, %noreg
- ; CHECK: %eax = VEXTRACTPSrr %xmm0, %noreg
- %eax = VEXTRACTPSZrr %xmm0, %noreg
- ; CHECK: %xmm0 = VINSERTPSrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, %noreg
- %xmm0 = VINSERTPSZrm %xmm0, %rdi, %noreg, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm0 = VINSERTPSrr %xmm0, %xmm0, %noreg
- %xmm0 = VINSERTPSZrr %xmm0, %xmm0, %noreg
+ ; CHECK: $xmm0 = VADDSDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VADDSDZrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VADDSDrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VADDSDZrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VADDSDrr $xmm0, $xmm1
+ $xmm0 = VADDSDZrr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VADDSDrr_Int $xmm0, $xmm1
+ $xmm0 = VADDSDZrr_Int $xmm0, $xmm1
+ ; CHECK: $xmm0 = VADDSSrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VADDSSZrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VADDSSrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VADDSSZrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VADDSSrr $xmm0, $xmm1
+ $xmm0 = VADDSSZrr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VADDSSrr_Int $xmm0, $xmm1
+ $xmm0 = VADDSSZrr_Int $xmm0, $xmm1
+ ; CHECK: $xmm0 = VDIVSDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VDIVSDZrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VDIVSDrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VDIVSDZrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VDIVSDrr $xmm0, $xmm1
+ $xmm0 = VDIVSDZrr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VDIVSDrr_Int $xmm0, $xmm1
+ $xmm0 = VDIVSDZrr_Int $xmm0, $xmm1
+ ; CHECK: $xmm0 = VDIVSSrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VDIVSSZrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VDIVSSrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VDIVSSZrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VDIVSSrr $xmm0, $xmm1
+ $xmm0 = VDIVSSZrr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VDIVSSrr_Int $xmm0, $xmm1
+ $xmm0 = VDIVSSZrr_Int $xmm0, $xmm1
+ ; CHECK: $xmm0 = VMAXCSDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMAXCSDZrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMAXCSDrr $xmm0, $xmm1
+ $xmm0 = VMAXCSDZrr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VMAXCSSrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMAXCSSZrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMAXCSSrr $xmm0, $xmm1
+ $xmm0 = VMAXCSSZrr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VMAXCSDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMAXSDZrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMAXSDrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMAXSDZrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMAXCSDrr $xmm0, $xmm1
+ $xmm0 = VMAXSDZrr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VMAXSDrr_Int $xmm0, $xmm1
+ $xmm0 = VMAXSDZrr_Int $xmm0, $xmm1
+ ; CHECK: $xmm0 = VMAXCSSrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMAXSSZrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMAXSSrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMAXSSZrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMAXCSSrr $xmm0, $xmm1
+ $xmm0 = VMAXSSZrr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VMAXSSrr_Int $xmm0, $xmm1
+ $xmm0 = VMAXSSZrr_Int $xmm0, $xmm1
+ ; CHECK: $xmm0 = VMINCSDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMINCSDZrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMINCSDrr $xmm0, $xmm1
+ $xmm0 = VMINCSDZrr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VMINCSSrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMINCSSZrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMINCSSrr $xmm0, $xmm1
+ $xmm0 = VMINCSSZrr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VMINCSDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMINSDZrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMINSDrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMINSDZrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMINCSDrr $xmm0, $xmm1
+ $xmm0 = VMINSDZrr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VMINSDrr_Int $xmm0, $xmm1
+ $xmm0 = VMINSDZrr_Int $xmm0, $xmm1
+ ; CHECK: $xmm0 = VMINCSSrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMINSSZrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMINSSrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMINSSZrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMINCSSrr $xmm0, $xmm1
+ $xmm0 = VMINSSZrr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VMINSSrr_Int $xmm0, $xmm1
+ $xmm0 = VMINSSZrr_Int $xmm0, $xmm1
+ ; CHECK: $xmm0 = VMULSDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMULSDZrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMULSDrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMULSDZrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMULSDrr $xmm0, $xmm1
+ $xmm0 = VMULSDZrr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VMULSDrr_Int $xmm0, $xmm1
+ $xmm0 = VMULSDZrr_Int $xmm0, $xmm1
+ ; CHECK: $xmm0 = VMULSSrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMULSSZrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMULSSrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VMULSSZrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VMULSSrr $xmm0, $xmm1
+ $xmm0 = VMULSSZrr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VMULSSrr_Int $xmm0, $xmm1
+ $xmm0 = VMULSSZrr_Int $xmm0, $xmm1
+ ; CHECK: $xmm0 = VSUBSDrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VSUBSDZrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VSUBSDrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VSUBSDZrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VSUBSDrr $xmm0, $xmm1
+ $xmm0 = VSUBSDZrr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VSUBSDrr_Int $xmm0, $xmm1
+ $xmm0 = VSUBSDZrr_Int $xmm0, $xmm1
+ ; CHECK: $xmm0 = VSUBSSrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VSUBSSZrm $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VSUBSSrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg
+ $xmm0 = VSUBSSZrm_Int $xmm0, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm0 = VSUBSSrr $xmm0, $xmm1
+ $xmm0 = VSUBSSZrr $xmm0, $xmm1
+ ; CHECK: $xmm0 = VSUBSSrr_Int $xmm0, $xmm1
+ $xmm0 = VSUBSSZrr_Int $xmm0, $xmm1
+ ; CHECK: $xmm0 = VFMADD132SDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMADD132SDZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMADD132SDm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMADD132SDZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMADD132SDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMADD132SDZr $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMADD132SDr_Int $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMADD132SDZr_Int $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMADD132SSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMADD132SSZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMADD132SSm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMADD132SSZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMADD132SSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMADD132SSZr $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMADD132SSr_Int $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMADD132SSZr_Int $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMADD213SDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMADD213SDZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMADD213SDm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMADD213SDZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMADD213SDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMADD213SDZr $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMADD213SDr_Int $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMADD213SDZr_Int $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMADD213SSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMADD213SSZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMADD213SSm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMADD213SSZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMADD213SSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMADD213SSZr $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMADD213SSr_Int $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMADD213SSZr_Int $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMADD231SDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMADD231SDZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMADD231SDm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMADD231SDZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMADD231SDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMADD231SDZr $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMADD231SDr_Int $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMADD231SDZr_Int $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMADD231SSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMADD231SSZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMADD231SSm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMADD231SSZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMADD231SSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMADD231SSZr $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMADD231SSr_Int $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMADD231SSZr_Int $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMSUB132SDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMSUB132SDZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMSUB132SDm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMSUB132SDZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMSUB132SDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMSUB132SDZr $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMSUB132SDr_Int $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMSUB132SDZr_Int $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMSUB132SSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMSUB132SSZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMSUB132SSm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMSUB132SSZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMSUB132SSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMSUB132SSZr $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMSUB132SSr_Int $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMSUB132SSZr_Int $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMSUB213SDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMSUB213SDZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMSUB213SDm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMSUB213SDZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMSUB213SDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMSUB213SDZr $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMSUB213SDr_Int $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMSUB213SDZr_Int $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMSUB213SSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMSUB213SSZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMSUB213SSm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMSUB213SSZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMSUB213SSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMSUB213SSZr $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMSUB213SSr_Int $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMSUB213SSZr_Int $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMSUB231SDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMSUB231SDZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMSUB231SDm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMSUB231SDZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMSUB231SDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMSUB231SDZr $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMSUB231SDr_Int $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMSUB231SDZr_Int $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMSUB231SSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMSUB231SSZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMSUB231SSm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFMSUB231SSZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFMSUB231SSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMSUB231SSZr $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFMSUB231SSr_Int $xmm0, $xmm1, $xmm2
+ $xmm0 = VFMSUB231SSZr_Int $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMADD132SDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMADD132SDZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMADD132SDm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMADD132SDZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMADD132SDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMADD132SDZr $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMADD132SDr_Int $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMADD132SDZr_Int $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMADD132SSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMADD132SSZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMADD132SSm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMADD132SSZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMADD132SSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMADD132SSZr $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMADD132SSr_Int $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMADD132SSZr_Int $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMADD213SDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMADD213SDZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMADD213SDm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMADD213SDZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMADD213SDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMADD213SDZr $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMADD213SDr_Int $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMADD213SDZr_Int $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMADD213SSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMADD213SSZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMADD213SSm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMADD213SSZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMADD213SSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMADD213SSZr $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMADD213SSr_Int $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMADD213SSZr_Int $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMADD231SDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMADD231SDZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMADD231SDm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMADD231SDZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMADD231SDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMADD231SDZr $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMADD231SDr_Int $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMADD231SDZr_Int $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMADD231SSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMADD231SSZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMADD231SSm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMADD231SSZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMADD231SSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMADD231SSZr $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMADD231SSr_Int $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMADD231SSZr_Int $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMSUB132SDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMSUB132SDZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMSUB132SDm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMSUB132SDZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMSUB132SDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMSUB132SDZr $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMSUB132SDr_Int $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMSUB132SDZr_Int $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMSUB132SSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMSUB132SSZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMSUB132SSm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMSUB132SSZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMSUB132SSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMSUB132SSZr $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMSUB132SSr_Int $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMSUB132SSZr_Int $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMSUB213SDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMSUB213SDZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMSUB213SDm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMSUB213SDZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMSUB213SDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMSUB213SDZr $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMSUB213SDr_Int $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMSUB213SDZr_Int $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMSUB213SSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMSUB213SSZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMSUB213SSm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMSUB213SSZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMSUB213SSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMSUB213SSZr $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMSUB213SSr_Int $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMSUB213SSZr_Int $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMSUB231SDm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMSUB231SDZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMSUB231SDm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMSUB231SDZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMSUB231SDr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMSUB231SDZr $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMSUB231SDr_Int $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMSUB231SDZr_Int $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMSUB231SSm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMSUB231SSZm $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMSUB231SSm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ $xmm0 = VFNMSUB231SSZm_Int $xmm0, $xmm0, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VFNMSUB231SSr $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMSUB231SSZr $xmm0, $xmm1, $xmm2
+ ; CHECK: $xmm0 = VFNMSUB231SSr_Int $xmm0, $xmm1, $xmm2
+ $xmm0 = VFNMSUB231SSZr_Int $xmm0, $xmm1, $xmm2
+ ; CHECK: VPEXTRBmr $rdi, 1, $noreg, 0, $noreg, $xmm0, 3
+ VPEXTRBZmr $rdi, 1, $noreg, 0, $noreg, $xmm0, 3
+ ; CHECK: $eax = VPEXTRBrr $xmm0, 1
+ $eax = VPEXTRBZrr $xmm0, 1
+ ; CHECK: VPEXTRDmr $rdi, 1, $noreg, 0, $noreg, $xmm0, 3
+ VPEXTRDZmr $rdi, 1, $noreg, 0, $noreg, $xmm0, 3
+ ; CHECK: $eax = VPEXTRDrr $xmm0, 1
+ $eax = VPEXTRDZrr $xmm0, 1
+ ; CHECK: VPEXTRQmr $rdi, 1, $noreg, 0, $noreg, $xmm0, 3
+ VPEXTRQZmr $rdi, 1, $noreg, 0, $noreg, $xmm0, 3
+ ; CHECK: $rax = VPEXTRQrr $xmm0, 1
+ $rax = VPEXTRQZrr $xmm0, 1
+ ; CHECK: VPEXTRWmr $rdi, 1, $noreg, 0, $noreg, $xmm0, 3
+ VPEXTRWZmr $rdi, 1, $noreg, 0, $noreg, $xmm0, 3
+ ; CHECK: $eax = VPEXTRWrr $xmm0, 1
+ $eax = VPEXTRWZrr $xmm0, 1
+ ; CHECK: $eax = VPEXTRWrr_REV $xmm0, 1
+ $eax = VPEXTRWZrr_REV $xmm0, 1
+ ; CHECK: $xmm0 = VPINSRBrm $xmm0, $rsi, 1, $noreg, 0, $noreg, 3
+ $xmm0 = VPINSRBZrm $xmm0, $rsi, 1, $noreg, 0, $noreg, 3
+ ; CHECK: $xmm0 = VPINSRBrr $xmm0, $edi, 5
+ $xmm0 = VPINSRBZrr $xmm0, $edi, 5
+ ; CHECK: $xmm0 = VPINSRDrm $xmm0, $rsi, 1, $noreg, 0, $noreg, 3
+ $xmm0 = VPINSRDZrm $xmm0, $rsi, 1, $noreg, 0, $noreg, 3
+ ; CHECK: $xmm0 = VPINSRDrr $xmm0, $edi, 5
+ $xmm0 = VPINSRDZrr $xmm0, $edi, 5
+ ; CHECK: $xmm0 = VPINSRQrm $xmm0, $rsi, 1, $noreg, 0, $noreg, 3
+ $xmm0 = VPINSRQZrm $xmm0, $rsi, 1, $noreg, 0, $noreg, 3
+ ; CHECK: $xmm0 = VPINSRQrr $xmm0, $rdi, 5
+ $xmm0 = VPINSRQZrr $xmm0, $rdi, 5
+ ; CHECK: $xmm0 = VPINSRWrm $xmm0, $rsi, 1, $noreg, 0, $noreg, 3
+ $xmm0 = VPINSRWZrm $xmm0, $rsi, 1, $noreg, 0, $noreg, 3
+ ; CHECK: $xmm0 = VPINSRWrr $xmm0, $edi, 5
+ $xmm0 = VPINSRWZrr $xmm0, $edi, 5
+ ; CHECK: $xmm0 = VSQRTSDm $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg
+ $xmm0 = VSQRTSDZm $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm0 = VSQRTSDm_Int $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg
+ $xmm0 = VSQRTSDZm_Int $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm0 = VSQRTSDr $xmm0, $noreg
+ $xmm0 = VSQRTSDZr $xmm0, $noreg
+ ; CHECK: $xmm0 = VSQRTSDr_Int $xmm0, $noreg
+ $xmm0 = VSQRTSDZr_Int $xmm0, $noreg
+ ; CHECK: $xmm0 = VSQRTSSm $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg
+ $xmm0 = VSQRTSSZm $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm0 = VSQRTSSm_Int $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg
+ $xmm0 = VSQRTSSZm_Int $xmm0, $noreg, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm0 = VSQRTSSr $xmm0, $noreg
+ $xmm0 = VSQRTSSZr $xmm0, $noreg
+ ; CHECK: $xmm0 = VSQRTSSr_Int $xmm0, $noreg
+ $xmm0 = VSQRTSSZr_Int $xmm0, $noreg
+ ; CHECK: $rdi = VCVTSD2SI64rr_Int $xmm0
+ $rdi = VCVTSD2SI64Zrr_Int $xmm0
+ ; CHECK: $edi = VCVTSD2SIrr_Int $xmm0
+ $edi = VCVTSD2SIZrr_Int $xmm0
+ ; CHECK: $xmm0 = VCVTSD2SSrm $xmm0, $rdi, 1, $noreg, 0, $noreg
+ $xmm0 = VCVTSD2SSZrm $xmm0, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VCVTSD2SSrm_Int $xmm0, $rdi, 1, $noreg, 0, $noreg
+ $xmm0 = VCVTSD2SSZrm_Int $xmm0, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VCVTSD2SSrr $xmm0, $noreg
+ $xmm0 = VCVTSD2SSZrr $xmm0, $noreg
+ ; CHECK: $xmm0 = VCVTSD2SSrr_Int $xmm0, $noreg
+ $xmm0 = VCVTSD2SSZrr_Int $xmm0, $noreg
+ ; CHECK: $xmm0 = VCVTSI2SDrm $xmm0, $rdi, 1, $noreg, 0, $noreg
+ $xmm0 = VCVTSI2SDZrm $xmm0, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VCVTSI2SDrm_Int $xmm0, $rdi, 1, $noreg, 0, $noreg
+ $xmm0 = VCVTSI2SDZrm_Int $xmm0, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VCVTSI2SDrr $xmm0, $noreg
+ $xmm0 = VCVTSI2SDZrr $xmm0, $noreg
+ ; CHECK: $xmm0 = VCVTSI2SDrr_Int $xmm0, $noreg
+ $xmm0 = VCVTSI2SDZrr_Int $xmm0, $noreg
+ ; CHECK: $xmm0 = VCVTSI2SSrm $xmm0, $rdi, 1, $noreg, 0, $noreg
+ $xmm0 = VCVTSI2SSZrm $xmm0, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VCVTSI2SSrm_Int $xmm0, $rdi, 1, $noreg, 0, $noreg
+ $xmm0 = VCVTSI2SSZrm_Int $xmm0, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VCVTSI2SSrr $xmm0, $noreg
+ $xmm0 = VCVTSI2SSZrr $xmm0, $noreg
+ ; CHECK: $xmm0 = VCVTSI2SSrr_Int $xmm0, $noreg
+ $xmm0 = VCVTSI2SSZrr_Int $xmm0, $noreg
+ ; CHECK: $xmm0 = VCVTSI642SDrm $xmm0, $rdi, 1, $noreg, 0, $noreg
+ $xmm0 = VCVTSI642SDZrm $xmm0, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VCVTSI642SDrm_Int $xmm0, $rdi, 1, $noreg, 0, $noreg
+ $xmm0 = VCVTSI642SDZrm_Int $xmm0, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VCVTSI642SDrr $xmm0, $noreg
+ $xmm0 = VCVTSI642SDZrr $xmm0, $noreg
+ ; CHECK: $xmm0 = VCVTSI642SDrr_Int $xmm0, $noreg
+ $xmm0 = VCVTSI642SDZrr_Int $xmm0, $noreg
+ ; CHECK: $xmm0 = VCVTSI642SSrm $xmm0, $rdi, 1, $noreg, 0, $noreg
+ $xmm0 = VCVTSI642SSZrm $xmm0, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VCVTSI642SSrm_Int $xmm0, $rdi, 1, $noreg, 0, $noreg
+ $xmm0 = VCVTSI642SSZrm_Int $xmm0, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VCVTSI642SSrr $xmm0, $noreg
+ $xmm0 = VCVTSI642SSZrr $xmm0, $noreg
+ ; CHECK: $xmm0 = VCVTSI642SSrr_Int $xmm0, $noreg
+ $xmm0 = VCVTSI642SSZrr_Int $xmm0, $noreg
+ ; CHECK: $xmm0 = VCVTSS2SDrm $xmm0, $rdi, 1, $noreg, 0, $noreg
+ $xmm0 = VCVTSS2SDZrm $xmm0, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VCVTSS2SDrm_Int $xmm0, $rdi, 1, $noreg, 0, $noreg
+ $xmm0 = VCVTSS2SDZrm_Int $xmm0, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm0 = VCVTSS2SDrr $xmm0, $noreg
+ $xmm0 = VCVTSS2SDZrr $xmm0, $noreg
+ ; CHECK: $xmm0 = VCVTSS2SDrr_Int $xmm0, $noreg
+ $xmm0 = VCVTSS2SDZrr_Int $xmm0, $noreg
+ ; CHECK: $rdi = VCVTSS2SI64rm_Int $rdi, $xmm0, 1, $noreg, 0
+ $rdi = VCVTSS2SI64Zrm_Int $rdi, $xmm0, 1, $noreg, 0
+ ; CHECK: $rdi = VCVTSS2SI64rr_Int $xmm0
+ $rdi = VCVTSS2SI64Zrr_Int $xmm0
+ ; CHECK: $edi = VCVTSS2SIrm_Int $rdi, $xmm0, 1, $noreg, 0
+ $edi = VCVTSS2SIZrm_Int $rdi, $xmm0, 1, $noreg, 0
+ ; CHECK: $edi = VCVTSS2SIrr_Int $xmm0
+ $edi = VCVTSS2SIZrr_Int $xmm0
+ ; CHECK: $rdi = VCVTTSD2SI64rm $rdi, $xmm0, 1, $noreg, 0
+ $rdi = VCVTTSD2SI64Zrm $rdi, $xmm0, 1, $noreg, 0
+ ; CHECK: $rdi = VCVTTSD2SI64rm_Int $rdi, $xmm0, 1, $noreg, 0
+ $rdi = VCVTTSD2SI64Zrm_Int $rdi, $xmm0, 1, $noreg, 0
+ ; CHECK: $rdi = VCVTTSD2SI64rr $xmm0
+ $rdi = VCVTTSD2SI64Zrr $xmm0
+ ; CHECK: $rdi = VCVTTSD2SI64rr_Int $xmm0
+ $rdi = VCVTTSD2SI64Zrr_Int $xmm0
+ ; CHECK: $edi = VCVTTSD2SIrm $rdi, $xmm0, 1, $noreg, 0
+ $edi = VCVTTSD2SIZrm $rdi, $xmm0, 1, $noreg, 0
+ ; CHECK: $edi = VCVTTSD2SIrm_Int $rdi, $xmm0, 1, $noreg, 0
+ $edi = VCVTTSD2SIZrm_Int $rdi, $xmm0, 1, $noreg, 0
+ ; CHECK: $edi = VCVTTSD2SIrr $xmm0
+ $edi = VCVTTSD2SIZrr $xmm0
+ ; CHECK: $edi = VCVTTSD2SIrr_Int $xmm0
+ $edi = VCVTTSD2SIZrr_Int $xmm0
+ ; CHECK: $rdi = VCVTTSS2SI64rm $rdi, $xmm0, 1, $noreg, 0
+ $rdi = VCVTTSS2SI64Zrm $rdi, $xmm0, 1, $noreg, 0
+ ; CHECK: $rdi = VCVTTSS2SI64rm_Int $rdi, $xmm0, 1, $noreg, 0
+ $rdi = VCVTTSS2SI64Zrm_Int $rdi, $xmm0, 1, $noreg, 0
+ ; CHECK: $rdi = VCVTTSS2SI64rr $xmm0
+ $rdi = VCVTTSS2SI64Zrr $xmm0
+ ; CHECK: $rdi = VCVTTSS2SI64rr_Int $xmm0
+ $rdi = VCVTTSS2SI64Zrr_Int $xmm0
+ ; CHECK: $edi = VCVTTSS2SIrm $rdi, $xmm0, 1, $noreg, 0
+ $edi = VCVTTSS2SIZrm $rdi, $xmm0, 1, $noreg, 0
+ ; CHECK: $edi = VCVTTSS2SIrm_Int $rdi, $xmm0, 1, $noreg, 0
+ $edi = VCVTTSS2SIZrm_Int $rdi, $xmm0, 1, $noreg, 0
+ ; CHECK: $edi = VCVTTSS2SIrr $xmm0
+ $edi = VCVTTSS2SIZrr $xmm0
+ ; CHECK: $edi = VCVTTSS2SIrr_Int $xmm0
+ $edi = VCVTTSS2SIZrr_Int $xmm0
+ ; CHECK: $xmm0 = VMOV64toSDrr $rdi
+ $xmm0 = VMOV64toSDZrr $rdi
+ ; CHECK: $xmm0 = VMOVDI2SSrm $rip, $noreg, $noreg, $noreg, $noreg
+ $xmm0 = VMOVDI2SSZrm $rip, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm0 = VMOVDI2SSrr $eax
+ $xmm0 = VMOVDI2SSZrr $eax
+ ; CHECK: VMOVSDmr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg
+ VMOVSDZmr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm0 = VMOVSDrm $rip, $noreg, $noreg, $noreg, $noreg
+ $xmm0 = VMOVSDZrm $rip, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm0 = VMOVSDrr $xmm0, $noreg
+ $xmm0 = VMOVSDZrr $xmm0, $noreg
+ ; CHECK: $xmm0 = VMOVSDrr_REV $xmm0, $noreg
+ $xmm0 = VMOVSDZrr_REV $xmm0, $noreg
+ ; CHECK: $rax = VMOVSDto64rr $xmm0
+ $rax = VMOVSDto64Zrr $xmm0
+ ; CHECK: VMOVSDto64mr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg
+ VMOVSDto64Zmr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: VMOVSSmr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg
+ VMOVSSZmr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm0 = VMOVSSrm $rip, $noreg, $noreg, $noreg, $noreg
+ $xmm0 = VMOVSSZrm $rip, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm0 = VMOVSSrr $xmm0, $noreg
+ $xmm0 = VMOVSSZrr $xmm0, $noreg
+ ; CHECK: $xmm0 = VMOVSSrr_REV $xmm0, $noreg
+ $xmm0 = VMOVSSZrr_REV $xmm0, $noreg
+ ; CHECK: VMOVSS2DImr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg
+ VMOVSS2DIZmr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $eax = VMOVSS2DIrr $xmm0
+ $eax = VMOVSS2DIZrr $xmm0
+ ; CHECK: $xmm0 = VMOV64toPQIrr $rdi
+ $xmm0 = VMOV64toPQIZrr $rdi
+ ; CHECK: $xmm0 = VMOV64toPQIrm $rdi, $noreg, $noreg, $noreg, $noreg
+ $xmm0 = VMOV64toPQIZrm $rdi, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm0 = VMOV64toSDrr $rdi
+ $xmm0 = VMOV64toSDZrr $rdi
+ ; CHECK: $xmm0 = VMOVDI2PDIrm $rip, $noreg, $noreg, $noreg, $noreg
+ $xmm0 = VMOVDI2PDIZrm $rip, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm0 = VMOVDI2PDIrr $edi
+ $xmm0 = VMOVDI2PDIZrr $edi
+ ; CHECK: $xmm0 = VMOVLHPSrr $xmm0, $noreg
+ $xmm0 = VMOVLHPSZrr $xmm0, $noreg
+ ; CHECK: $xmm0 = VMOVHLPSrr $xmm0, $noreg
+ $xmm0 = VMOVHLPSZrr $xmm0, $noreg
+ ; CHECK: VMOVPDI2DImr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg
+ VMOVPDI2DIZmr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $edi = VMOVPDI2DIrr $xmm0
+ $edi = VMOVPDI2DIZrr $xmm0
+ ; CHECK: $xmm0 = VMOVPQI2QIrr $xmm0
+ $xmm0 = VMOVPQI2QIZrr $xmm0
+ ; CHECK: VMOVPQI2QImr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg
+ VMOVPQI2QIZmr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $rdi = VMOVPQIto64rr $xmm0
+ $rdi = VMOVPQIto64Zrr $xmm0
+ ; CHECK: VMOVPQIto64mr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg
+ VMOVPQIto64Zmr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm0 = VMOVQI2PQIrm $rip, $noreg, $noreg, $noreg, $noreg
+ $xmm0 = VMOVQI2PQIZrm $rip, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm0 = VMOVZPQILo2PQIrr $xmm0
+ $xmm0 = VMOVZPQILo2PQIZrr $xmm0
+ ; CHECK: VCOMISDrm_Int $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ VCOMISDZrm_Int $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ ; CHECK: VCOMISDrr_Int $xmm0, $xmm1, implicit-def $eflags
+ VCOMISDZrr_Int $xmm0, $xmm1, implicit-def $eflags
+ ; CHECK: VCOMISSrm_Int $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ VCOMISSZrm_Int $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ ; CHECK: VCOMISSrr_Int $xmm0, $xmm1, implicit-def $eflags
+ VCOMISSZrr_Int $xmm0, $xmm1, implicit-def $eflags
+ ; CHECK: VUCOMISDrm_Int $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ VUCOMISDZrm_Int $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ ; CHECK: VUCOMISDrr_Int $xmm0, $xmm1, implicit-def $eflags
+ VUCOMISDZrr_Int $xmm0, $xmm1, implicit-def $eflags
+ ; CHECK: VUCOMISSrm_Int $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ VUCOMISSZrm_Int $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ ; CHECK: VUCOMISSrr_Int $xmm0, $xmm1, implicit-def $eflags
+ VUCOMISSZrr_Int $xmm0, $xmm1, implicit-def $eflags
+ ; CHECK: VCOMISDrm $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ VCOMISDZrm $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ ; CHECK: VCOMISDrr $xmm0, $xmm1, implicit-def $eflags
+ VCOMISDZrr $xmm0, $xmm1, implicit-def $eflags
+ ; CHECK: VCOMISSrm $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ VCOMISSZrm $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ ; CHECK: VCOMISSrr $xmm0, $xmm1, implicit-def $eflags
+ VCOMISSZrr $xmm0, $xmm1, implicit-def $eflags
+ ; CHECK: VUCOMISDrm $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ VUCOMISDZrm $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ ; CHECK: VUCOMISDrr $xmm0, $xmm1, implicit-def $eflags
+ VUCOMISDZrr $xmm0, $xmm1, implicit-def $eflags
+ ; CHECK: VUCOMISSrm $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ VUCOMISSZrm $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ ; CHECK: VUCOMISSrr $xmm0, $xmm1, implicit-def $eflags
+ VUCOMISSZrr $xmm0, $xmm1, implicit-def $eflags
+ ; CHECK: VEXTRACTPSmr $rdi, 1, $noreg, 0, $noreg, $xmm0, $noreg
+ VEXTRACTPSZmr $rdi, 1, $noreg, 0, $noreg, $xmm0, $noreg
+ ; CHECK: $eax = VEXTRACTPSrr $xmm0, $noreg
+ $eax = VEXTRACTPSZrr $xmm0, $noreg
+ ; CHECK: $xmm0 = VINSERTPSrm $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, $noreg
+ $xmm0 = VINSERTPSZrm $xmm0, $rdi, $noreg, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm0 = VINSERTPSrr $xmm0, $xmm0, $noreg
+ $xmm0 = VINSERTPSZrr $xmm0, $xmm0, $noreg
- RET 0, %zmm0, %zmm1
+ RET 0, $zmm0, $zmm1
...
---
# CHECK-LABEL: name: evex_z256_to_evex_test
@@ -2326,880 +2326,880 @@
name: evex_z256_to_evex_test
body: |
bb.0:
- ; CHECK: VMOVAPDZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
- VMOVAPDZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
- ; CHECK: %ymm16 = VMOVAPDZ256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VMOVAPDZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VMOVAPDZ256rr %ymm16
- %ymm16 = VMOVAPDZ256rr %ymm16
- ; CHECK: %ymm16 = VMOVAPDZ256rr_REV %ymm16
- %ymm16 = VMOVAPDZ256rr_REV %ymm16
- ; CHECK: VMOVAPSZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
- VMOVAPSZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
- ; CHECK: %ymm16 = VMOVAPSZ256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VMOVAPSZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VMOVAPSZ256rr %ymm16
- %ymm16 = VMOVAPSZ256rr %ymm16
- ; CHECK: %ymm16 = VMOVAPSZ256rr_REV %ymm16
- %ymm16 = VMOVAPSZ256rr_REV %ymm16
- ; CHECK: %ymm16 = VMOVDDUPZ256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VMOVDDUPZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VMOVDDUPZ256rr %ymm16
- %ymm16 = VMOVDDUPZ256rr %ymm16
- ; CHECK: VMOVDQA32Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
- VMOVDQA32Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
- ; CHECK: %ymm16 = VMOVDQA32Z256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VMOVDQA32Z256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VMOVDQA32Z256rr %ymm16
- %ymm16 = VMOVDQA32Z256rr %ymm16
- ; CHECK: %ymm16 = VMOVDQA32Z256rr_REV %ymm16
- %ymm16 = VMOVDQA32Z256rr_REV %ymm16
- ; CHECK: VMOVDQA64Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
- VMOVDQA64Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
- ; CHECK: %ymm16 = VMOVDQA64Z256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VMOVDQA64Z256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VMOVDQA64Z256rr %ymm16
- %ymm16 = VMOVDQA64Z256rr %ymm16
- ; CHECK: %ymm16 = VMOVDQA64Z256rr_REV %ymm16
- %ymm16 = VMOVDQA64Z256rr_REV %ymm16
- ; CHECK: VMOVDQU16Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
- VMOVDQU16Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
- ; CHECK: %ymm16 = VMOVDQU16Z256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VMOVDQU16Z256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VMOVDQU16Z256rr %ymm16
- %ymm16 = VMOVDQU16Z256rr %ymm16
- ; CHECK: %ymm16 = VMOVDQU16Z256rr_REV %ymm16
- %ymm16 = VMOVDQU16Z256rr_REV %ymm16
- ; CHECK: VMOVDQU32Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
- VMOVDQU32Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
- ; CHECK: %ymm16 = VMOVDQU32Z256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VMOVDQU32Z256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VMOVDQU32Z256rr %ymm16
- %ymm16 = VMOVDQU32Z256rr %ymm16
- ; CHECK: %ymm16 = VMOVDQU32Z256rr_REV %ymm16
- %ymm16 = VMOVDQU32Z256rr_REV %ymm16
- ; CHECK: VMOVDQU64Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
- VMOVDQU64Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
- ; CHECK: %ymm16 = VMOVDQU64Z256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VMOVDQU64Z256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VMOVDQU64Z256rr %ymm16
- %ymm16 = VMOVDQU64Z256rr %ymm16
- ; CHECK: %ymm16 = VMOVDQU64Z256rr_REV %ymm16
- %ymm16 = VMOVDQU64Z256rr_REV %ymm16
- ; CHECK: VMOVDQU8Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
- VMOVDQU8Z256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
- ; CHECK: %ymm16 = VMOVDQU8Z256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VMOVDQU8Z256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VMOVDQU8Z256rr %ymm16
- %ymm16 = VMOVDQU8Z256rr %ymm16
- ; CHECK: %ymm16 = VMOVDQU8Z256rr_REV %ymm16
- %ymm16 = VMOVDQU8Z256rr_REV %ymm16
- ; CHECK: %ymm16 = VMOVNTDQAZ256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VMOVNTDQAZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: VMOVNTDQZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
- VMOVNTDQZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
- ; CHECK: VMOVNTPDZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
- VMOVNTPDZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
- ; CHECK: VMOVNTPSZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
- VMOVNTPSZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
- ; CHECK: %ymm16 = VMOVSHDUPZ256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VMOVSHDUPZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VMOVSHDUPZ256rr %ymm16
- %ymm16 = VMOVSHDUPZ256rr %ymm16
- ; CHECK: %ymm16 = VMOVSLDUPZ256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VMOVSLDUPZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VMOVSLDUPZ256rr %ymm16
- %ymm16 = VMOVSLDUPZ256rr %ymm16
- ; CHECK: VMOVUPDZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
- VMOVUPDZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
- ; CHECK: %ymm16 = VMOVUPDZ256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VMOVUPDZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VMOVUPDZ256rr %ymm16
- %ymm16 = VMOVUPDZ256rr %ymm16
- ; CHECK: %ymm16 = VMOVUPDZ256rr_REV %ymm16
- %ymm16 = VMOVUPDZ256rr_REV %ymm16
- ; CHECK: VMOVUPSZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
- VMOVUPSZ256mr %rdi, 1, %noreg, 0, %noreg, %ymm16
- ; CHECK: %ymm16 = VPANDDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPANDDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPANDDZ256rr %ymm16, %ymm1
- %ymm16 = VPANDDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPANDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPANDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPANDQZ256rr %ymm16, %ymm1
- %ymm16 = VPANDQZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPANDNDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPANDNDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPANDNDZ256rr %ymm16, %ymm1
- %ymm16 = VPANDNDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPANDNQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPANDNQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPANDNQZ256rr %ymm16, %ymm1
- %ymm16 = VPANDNQZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPAVGBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPAVGBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPAVGBZ256rr %ymm16, %ymm1
- %ymm16 = VPAVGBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPAVGWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPAVGWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPAVGWZ256rr %ymm16, %ymm1
- %ymm16 = VPAVGWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPADDBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPADDBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPADDBZ256rr %ymm16, %ymm1
- %ymm16 = VPADDBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPADDDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPADDDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPADDDZ256rr %ymm16, %ymm1
- %ymm16 = VPADDDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPADDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPADDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPADDQZ256rr %ymm16, %ymm1
- %ymm16 = VPADDQZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPADDSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPADDSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPADDSBZ256rr %ymm16, %ymm1
- %ymm16 = VPADDSBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPADDSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPADDSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPADDSWZ256rr %ymm16, %ymm1
- %ymm16 = VPADDSWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPADDUSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPADDUSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPADDUSBZ256rr %ymm16, %ymm1
- %ymm16 = VPADDUSBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPADDUSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPADDUSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPADDUSWZ256rr %ymm16, %ymm1
- %ymm16 = VPADDUSWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPADDWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPADDWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPADDWZ256rr %ymm16, %ymm1
- %ymm16 = VPADDWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VMULPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VMULPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VMULPDZ256rr %ymm16, %ymm1
- %ymm16 = VMULPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VMULPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VMULPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VMULPSZ256rr %ymm16, %ymm1
- %ymm16 = VMULPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VORPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VORPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VORPDZ256rr %ymm16, %ymm1
- %ymm16 = VORPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VORPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VORPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VORPSZ256rr %ymm16, %ymm1
- %ymm16 = VORPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMADDUBSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMADDUBSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMADDUBSWZ256rr %ymm16, %ymm1
- %ymm16 = VPMADDUBSWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMADDWDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMADDWDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMADDWDZ256rr %ymm16, %ymm1
- %ymm16 = VPMADDWDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMAXSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMAXSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMAXSBZ256rr %ymm16, %ymm1
- %ymm16 = VPMAXSBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMAXSDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMAXSDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMAXSDZ256rr %ymm16, %ymm1
- %ymm16 = VPMAXSDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMAXSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMAXSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMAXSWZ256rr %ymm16, %ymm1
- %ymm16 = VPMAXSWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMAXUBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMAXUBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMAXUBZ256rr %ymm16, %ymm1
- %ymm16 = VPMAXUBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMAXUDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMAXUDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMAXUDZ256rr %ymm16, %ymm1
- %ymm16 = VPMAXUDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMAXUWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMAXUWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMAXUWZ256rr %ymm16, %ymm1
- %ymm16 = VPMAXUWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMINSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMINSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMINSBZ256rr %ymm16, %ymm1
- %ymm16 = VPMINSBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMINSDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMINSDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMINSDZ256rr %ymm16, %ymm1
- %ymm16 = VPMINSDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMINSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMINSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMINSWZ256rr %ymm16, %ymm1
- %ymm16 = VPMINSWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMINUBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMINUBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMINUBZ256rr %ymm16, %ymm1
- %ymm16 = VPMINUBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMINUDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMINUDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMINUDZ256rr %ymm16, %ymm1
- %ymm16 = VPMINUDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMINUWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMINUWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMINUWZ256rr %ymm16, %ymm1
- %ymm16 = VPMINUWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMULDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMULDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMULDQZ256rr %ymm16, %ymm1
- %ymm16 = VPMULDQZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMULHRSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMULHRSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMULHRSWZ256rr %ymm16, %ymm1
- %ymm16 = VPMULHRSWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMULHUWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMULHUWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMULHUWZ256rr %ymm16, %ymm1
- %ymm16 = VPMULHUWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMULHWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMULHWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMULHWZ256rr %ymm16, %ymm1
- %ymm16 = VPMULHWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMULLDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMULLDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMULLDZ256rr %ymm16, %ymm1
- %ymm16 = VPMULLDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMULLWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMULLWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMULLWZ256rr %ymm16, %ymm1
- %ymm16 = VPMULLWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPMULUDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMULUDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMULUDQZ256rr %ymm16, %ymm1
- %ymm16 = VPMULUDQZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPORDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPORDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPORDZ256rr %ymm16, %ymm1
- %ymm16 = VPORDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPORQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPORQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPORQZ256rr %ymm16, %ymm1
- %ymm16 = VPORQZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPSUBBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPSUBBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPSUBBZ256rr %ymm16, %ymm1
- %ymm16 = VPSUBBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPSUBDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPSUBDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPSUBDZ256rr %ymm16, %ymm1
- %ymm16 = VPSUBDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPSUBQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPSUBQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPSUBQZ256rr %ymm16, %ymm1
- %ymm16 = VPSUBQZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPSUBSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPSUBSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPSUBSBZ256rr %ymm16, %ymm1
- %ymm16 = VPSUBSBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPSUBSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPSUBSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPSUBSWZ256rr %ymm16, %ymm1
- %ymm16 = VPSUBSWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPSUBUSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPSUBUSBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPSUBUSBZ256rr %ymm16, %ymm1
- %ymm16 = VPSUBUSBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPSUBUSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPSUBUSWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPSUBUSWZ256rr %ymm16, %ymm1
- %ymm16 = VPSUBUSWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPSUBWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPSUBWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPSUBWZ256rr %ymm16, %ymm1
- %ymm16 = VPSUBWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPXORDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPXORDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPXORDZ256rr %ymm16, %ymm1
- %ymm16 = VPXORDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPXORQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPXORQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPXORQZ256rr %ymm16, %ymm1
- %ymm16 = VPXORQZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VADDPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VADDPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VADDPDZ256rr %ymm16, %ymm1
- %ymm16 = VADDPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VADDPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VADDPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VADDPSZ256rr %ymm16, %ymm1
- %ymm16 = VADDPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VANDNPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VANDNPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VANDNPDZ256rr %ymm16, %ymm1
- %ymm16 = VANDNPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VANDNPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VANDNPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VANDNPSZ256rr %ymm16, %ymm1
- %ymm16 = VANDNPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VANDPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VANDPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VANDPDZ256rr %ymm16, %ymm1
- %ymm16 = VANDPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VANDPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VANDPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VANDPSZ256rr %ymm16, %ymm1
- %ymm16 = VANDPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VDIVPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VDIVPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VDIVPDZ256rr %ymm16, %ymm1
- %ymm16 = VDIVPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VDIVPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VDIVPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VDIVPSZ256rr %ymm16, %ymm1
- %ymm16 = VDIVPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VMAXCPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VMAXCPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VMAXCPDZ256rr %ymm16, %ymm1
- %ymm16 = VMAXCPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VMAXCPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VMAXCPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VMAXCPSZ256rr %ymm16, %ymm1
- %ymm16 = VMAXCPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VMAXPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VMAXPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VMAXPDZ256rr %ymm16, %ymm1
- %ymm16 = VMAXPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VMAXPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VMAXPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VMAXPSZ256rr %ymm16, %ymm1
- %ymm16 = VMAXPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VMINCPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VMINCPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VMINCPDZ256rr %ymm16, %ymm1
- %ymm16 = VMINCPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VMINCPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VMINCPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VMINCPSZ256rr %ymm16, %ymm1
- %ymm16 = VMINCPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VMINPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VMINPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VMINPDZ256rr %ymm16, %ymm1
- %ymm16 = VMINPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VMINPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VMINPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VMINPSZ256rr %ymm16, %ymm1
- %ymm16 = VMINPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VXORPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VXORPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VXORPDZ256rr %ymm16, %ymm1
- %ymm16 = VXORPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VXORPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VXORPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VXORPSZ256rr %ymm16, %ymm1
- %ymm16 = VXORPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPACKSSDWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPACKSSDWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPACKSSDWZ256rr %ymm16, %ymm1
- %ymm16 = VPACKSSDWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPACKSSWBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPACKSSWBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPACKSSWBZ256rr %ymm16, %ymm1
- %ymm16 = VPACKSSWBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPACKUSDWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPACKUSDWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPACKUSDWZ256rr %ymm16, %ymm1
- %ymm16 = VPACKUSDWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPACKUSWBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPACKUSWBZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPACKUSWBZ256rr %ymm16, %ymm1
- %ymm16 = VPACKUSWBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VUNPCKHPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VUNPCKHPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VUNPCKHPDZ256rr %ymm16, %ymm1
- %ymm16 = VUNPCKHPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VUNPCKHPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VUNPCKHPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VUNPCKHPSZ256rr %ymm16, %ymm1
- %ymm16 = VUNPCKHPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VUNPCKLPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VUNPCKLPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VUNPCKLPDZ256rr %ymm16, %ymm1
- %ymm16 = VUNPCKLPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VUNPCKLPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VUNPCKLPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VUNPCKLPSZ256rr %ymm16, %ymm1
- %ymm16 = VUNPCKLPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VSUBPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VSUBPDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VSUBPDZ256rr %ymm16, %ymm1
- %ymm16 = VSUBPDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VSUBPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VSUBPSZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VSUBPSZ256rr %ymm16, %ymm1
- %ymm16 = VSUBPSZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPUNPCKHBWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPUNPCKHBWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPUNPCKHBWZ256rr %ymm16, %ymm1
- %ymm16 = VPUNPCKHBWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPUNPCKHDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPUNPCKHDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPUNPCKHDQZ256rr %ymm16, %ymm1
- %ymm16 = VPUNPCKHDQZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPUNPCKHQDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPUNPCKHQDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPUNPCKHQDQZ256rr %ymm16, %ymm1
- %ymm16 = VPUNPCKHQDQZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPUNPCKHWDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPUNPCKHWDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPUNPCKHWDZ256rr %ymm16, %ymm1
- %ymm16 = VPUNPCKHWDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPUNPCKLBWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPUNPCKLBWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPUNPCKLBWZ256rr %ymm16, %ymm1
- %ymm16 = VPUNPCKLBWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPUNPCKLDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPUNPCKLDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPUNPCKLDQZ256rr %ymm16, %ymm1
- %ymm16 = VPUNPCKLDQZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPUNPCKLQDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPUNPCKLQDQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPUNPCKLQDQZ256rr %ymm16, %ymm1
- %ymm16 = VPUNPCKLQDQZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPUNPCKLWDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPUNPCKLWDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPUNPCKLWDZ256rr %ymm16, %ymm1
- %ymm16 = VPUNPCKLWDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VFMADD132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFMADD132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFMADD132PDZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFMADD132PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMADD132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFMADD132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFMADD132PSZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFMADD132PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMADD213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFMADD213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFMADD213PDZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFMADD213PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMADD213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFMADD213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFMADD213PSZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFMADD213PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMADD231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFMADD231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFMADD231PDZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFMADD231PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMADD231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFMADD231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFMADD231PSZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFMADD231PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMADDSUB132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFMADDSUB132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFMADDSUB132PDZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFMADDSUB132PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMADDSUB132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFMADDSUB132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFMADDSUB132PSZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFMADDSUB132PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMADDSUB213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFMADDSUB213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFMADDSUB213PDZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFMADDSUB213PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMADDSUB213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFMADDSUB213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFMADDSUB213PSZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFMADDSUB213PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMADDSUB231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFMADDSUB231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFMADDSUB231PDZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFMADDSUB231PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMADDSUB231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFMADDSUB231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFMADDSUB231PSZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFMADDSUB231PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMSUB132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFMSUB132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFMSUB132PDZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFMSUB132PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMSUB132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFMSUB132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFMSUB132PSZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFMSUB132PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMSUB213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFMSUB213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFMSUB213PDZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFMSUB213PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMSUB213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFMSUB213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFMSUB213PSZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFMSUB213PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMSUB231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFMSUB231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFMSUB231PDZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFMSUB231PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMSUB231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFMSUB231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFMSUB231PSZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFMSUB231PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMSUBADD132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFMSUBADD132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFMSUBADD132PDZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFMSUBADD132PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMSUBADD132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFMSUBADD132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFMSUBADD132PSZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFMSUBADD132PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMSUBADD213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFMSUBADD213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFMSUBADD213PDZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFMSUBADD213PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMSUBADD213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFMSUBADD213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFMSUBADD213PSZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFMSUBADD213PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMSUBADD231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFMSUBADD231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFMSUBADD231PDZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFMSUBADD231PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFMSUBADD231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFMSUBADD231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFMSUBADD231PSZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFMSUBADD231PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFNMADD132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFNMADD132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFNMADD132PDZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFNMADD132PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFNMADD132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFNMADD132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFNMADD132PSZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFNMADD132PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFNMADD213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFNMADD213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFNMADD213PDZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFNMADD213PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFNMADD213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFNMADD213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFNMADD213PSZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFNMADD213PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFNMADD231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFNMADD231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFNMADD231PDZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFNMADD231PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFNMADD231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFNMADD231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFNMADD231PSZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFNMADD231PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFNMSUB132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFNMSUB132PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFNMSUB132PDZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFNMSUB132PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFNMSUB132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFNMSUB132PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFNMSUB132PSZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFNMSUB132PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFNMSUB213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFNMSUB213PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFNMSUB213PDZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFNMSUB213PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFNMSUB213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFNMSUB213PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFNMSUB213PSZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFNMSUB213PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFNMSUB231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFNMSUB231PDZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFNMSUB231PDZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFNMSUB231PDZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VFNMSUB231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- %ymm16 = VFNMSUB231PSZ256m %ymm16, %ymm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VFNMSUB231PSZ256r %ymm16, %ymm1, %ymm2
- %ymm16 = VFNMSUB231PSZ256r %ymm16, %ymm1, %ymm2
- ; CHECK: %ymm16 = VPSRADZ256ri %ymm16, 7
- %ymm16 = VPSRADZ256ri %ymm16, 7
- ; CHECK: %ymm16 = VPSRADZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPSRADZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPSRADZ256rr %ymm16, %xmm1
- %ymm16 = VPSRADZ256rr %ymm16, %xmm1
- ; CHECK: %ymm16 = VPSRAVDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPSRAVDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPSRAVDZ256rr %ymm16, %ymm1
- %ymm16 = VPSRAVDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPSRAWZ256ri %ymm16, 7
- %ymm16 = VPSRAWZ256ri %ymm16, 7
- ; CHECK: %ymm16 = VPSRAWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPSRAWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPSRAWZ256rr %ymm16, %xmm1
- %ymm16 = VPSRAWZ256rr %ymm16, %xmm1
- ; CHECK: %ymm16 = VPSRLDQZ256rr %ymm16, %ymm1
- %ymm16 = VPSRLDQZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPSRLDZ256ri %ymm16, 7
- %ymm16 = VPSRLDZ256ri %ymm16, 7
- ; CHECK: %ymm16 = VPSRLDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPSRLDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPSRLDZ256rr %ymm16, %xmm1
- %ymm16 = VPSRLDZ256rr %ymm16, %xmm1
- ; CHECK: %ymm16 = VPSRLQZ256ri %ymm16, 7
- %ymm16 = VPSRLQZ256ri %ymm16, 7
- ; CHECK: %ymm16 = VPSRLQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPSRLQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPSRLQZ256rr %ymm16, %xmm1
- %ymm16 = VPSRLQZ256rr %ymm16, %xmm1
- ; CHECK: %ymm16 = VPSRLVDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPSRLVDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPSRLVDZ256rr %ymm16, %ymm1
- %ymm16 = VPSRLVDZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPSRLVQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPSRLVQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPSRLVQZ256rr %ymm16, %ymm1
- %ymm16 = VPSRLVQZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPSRLWZ256ri %ymm16, 7
- %ymm16 = VPSRLWZ256ri %ymm16, 7
- ; CHECK: %ymm16 = VPSRLWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPSRLWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPSRLWZ256rr %ymm16, %xmm1
- %ymm16 = VPSRLWZ256rr %ymm16, %xmm1
- ; CHECK: %ymm16 = VPMOVSXBDZ256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMOVSXBDZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMOVSXBDZ256rr %xmm0
- %ymm16 = VPMOVSXBDZ256rr %xmm0
- ; CHECK: %ymm16 = VPMOVSXBQZ256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMOVSXBQZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMOVSXBQZ256rr %xmm0
- %ymm16 = VPMOVSXBQZ256rr %xmm0
- ; CHECK: %ymm16 = VPMOVSXBWZ256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMOVSXBWZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMOVSXBWZ256rr %xmm0
- %ymm16 = VPMOVSXBWZ256rr %xmm0
- ; CHECK: %ymm16 = VPMOVSXDQZ256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMOVSXDQZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMOVSXDQZ256rr %xmm0
- %ymm16 = VPMOVSXDQZ256rr %xmm0
- ; CHECK: %ymm16 = VPMOVSXWDZ256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMOVSXWDZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMOVSXWDZ256rr %xmm0
- %ymm16 = VPMOVSXWDZ256rr %xmm0
- ; CHECK: %ymm16 = VPMOVSXWQZ256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMOVSXWQZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMOVSXWQZ256rr %xmm0
- %ymm16 = VPMOVSXWQZ256rr %xmm0
- ; CHECK: %ymm16 = VPMOVZXBDZ256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMOVZXBDZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMOVZXBDZ256rr %xmm0
- %ymm16 = VPMOVZXBDZ256rr %xmm0
- ; CHECK: %ymm16 = VPMOVZXBQZ256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMOVZXBQZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMOVZXBQZ256rr %xmm0
- %ymm16 = VPMOVZXBQZ256rr %xmm0
- ; CHECK: %ymm16 = VPMOVZXBWZ256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMOVZXBWZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMOVZXBWZ256rr %xmm0
- %ymm16 = VPMOVZXBWZ256rr %xmm0
- ; CHECK: %ymm16 = VPMOVZXDQZ256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMOVZXDQZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMOVZXDQZ256rr %xmm0
- %ymm16 = VPMOVZXDQZ256rr %xmm0
- ; CHECK: %ymm16 = VPMOVZXWDZ256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMOVZXWDZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMOVZXWDZ256rr %xmm0
- %ymm16 = VPMOVZXWDZ256rr %xmm0
- ; CHECK: %ymm16 = VPMOVZXWQZ256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPMOVZXWQZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPMOVZXWQZ256rr %xmm0
- %ymm16 = VPMOVZXWQZ256rr %xmm0
- ; CHECK: %ymm16 = VBROADCASTF32X2Z256m %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VBROADCASTF32X2Z256m %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VBROADCASTF32X2Z256r %xmm16
- %ymm16 = VBROADCASTF32X2Z256r %xmm16
- ; CHECK: %ymm16 = VBROADCASTF32X4Z256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VBROADCASTF32X4Z256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VBROADCASTSDZ256m %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VBROADCASTSDZ256m %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VBROADCASTSDZ256r %xmm0
- %ymm16 = VBROADCASTSDZ256r %xmm0
- ; CHECK: %ymm16 = VBROADCASTSSZ256m %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VBROADCASTSSZ256m %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VBROADCASTSSZ256r %xmm0
- %ymm16 = VBROADCASTSSZ256r %xmm0
- ; CHECK: %ymm16 = VPBROADCASTBZ256m %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPBROADCASTBZ256m %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPBROADCASTBZ256r %xmm0
- %ymm16 = VPBROADCASTBZ256r %xmm0
- ; CHECK: %ymm16 = VPBROADCASTDZ256m %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPBROADCASTDZ256m %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPBROADCASTDZ256r %xmm0
- %ymm16 = VPBROADCASTDZ256r %xmm0
- ; CHECK: %ymm16 = VPBROADCASTWZ256m %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPBROADCASTWZ256m %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPBROADCASTWZ256r %xmm0
- %ymm16 = VPBROADCASTWZ256r %xmm0
- ; CHECK: %ymm16 = VBROADCASTI32X4Z256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VBROADCASTI32X4Z256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VBROADCASTI32X2Z256m %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VBROADCASTI32X2Z256m %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VBROADCASTI32X2Z256r %xmm16
- %ymm16 = VBROADCASTI32X2Z256r %xmm16
- ; CHECK: %ymm16 = VPBROADCASTQZ256m %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPBROADCASTQZ256m %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPBROADCASTQZ256r %xmm0
- %ymm16 = VPBROADCASTQZ256r %xmm0
- ; CHECK: %ymm16 = VPABSBZ256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPABSBZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPABSBZ256rr %ymm16
- %ymm16 = VPABSBZ256rr %ymm16
- ; CHECK: %ymm16 = VPABSDZ256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPABSDZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPABSDZ256rr %ymm16
- %ymm16 = VPABSDZ256rr %ymm16
- ; CHECK: %ymm16 = VPABSWZ256rm %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPABSWZ256rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPABSWZ256rr %ymm16
- %ymm16 = VPABSWZ256rr %ymm16
- ; CHECK: %ymm16 = VPSADBWZ256rm %ymm16, 1, %noreg, %rax, %noreg, %noreg
- %ymm16 = VPSADBWZ256rm %ymm16, 1, %noreg, %rax, %noreg, %noreg
- ; CHECK: %ymm16 = VPSADBWZ256rr %ymm16, %ymm1
- %ymm16 = VPSADBWZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPERMDZ256rm %ymm16, %rdi, 1, %noreg, 0, %noreg
- %ymm16 = VPERMDZ256rm %ymm16, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VPERMDZ256rr %ymm1, %ymm16
- %ymm16 = VPERMDZ256rr %ymm1, %ymm16
- ; CHECK: %ymm16 = VPERMILPDZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
- %ymm16 = VPERMILPDZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %ymm16 = VPERMILPDZ256ri %ymm16, 7
- %ymm16 = VPERMILPDZ256ri %ymm16, 7
- ; CHECK: %ymm16 = VPERMILPDZ256rm %ymm16, %rdi, 1, %noreg, 0, %noreg
- %ymm16 = VPERMILPDZ256rm %ymm16, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VPERMILPDZ256rr %ymm1, %ymm16
- %ymm16 = VPERMILPDZ256rr %ymm1, %ymm16
- ; CHECK: %ymm16 = VPERMILPSZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
- %ymm16 = VPERMILPSZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %ymm16 = VPERMILPSZ256ri %ymm16, 7
- %ymm16 = VPERMILPSZ256ri %ymm16, 7
- ; CHECK: %ymm16 = VPERMILPSZ256rm %ymm16, %rdi, 1, %noreg, 0, %noreg
- %ymm16 = VPERMILPSZ256rm %ymm16, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VPERMILPSZ256rr %ymm1, %ymm16
- %ymm16 = VPERMILPSZ256rr %ymm1, %ymm16
- ; CHECK: %ymm16 = VPERMPDZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
- %ymm16 = VPERMPDZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %ymm16 = VPERMPDZ256ri %ymm16, 7
- %ymm16 = VPERMPDZ256ri %ymm16, 7
- ; CHECK: %ymm16 = VPERMPSZ256rm %ymm16, %rdi, 1, %noreg, 0, %noreg
- %ymm16 = VPERMPSZ256rm %ymm16, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VPERMPSZ256rr %ymm1, %ymm16
- %ymm16 = VPERMPSZ256rr %ymm1, %ymm16
- ; CHECK: %ymm16 = VPERMQZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
- %ymm16 = VPERMQZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %ymm16 = VPERMQZ256ri %ymm16, 7
- %ymm16 = VPERMQZ256ri %ymm16, 7
- ; CHECK: %ymm16 = VPSLLDQZ256rr %ymm16, 14
- %ymm16 = VPSLLDQZ256rr %ymm16, 14
- ; CHECK: %ymm16 = VPSLLDZ256ri %ymm16, 7
- %ymm16 = VPSLLDZ256ri %ymm16, 7
- ; CHECK: %ymm16 = VPSLLDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPSLLDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPSLLDZ256rr %ymm16, 14
- %ymm16 = VPSLLDZ256rr %ymm16, 14
- ; CHECK: %ymm16 = VPSLLQZ256ri %ymm16, 7
- %ymm16 = VPSLLQZ256ri %ymm16, 7
- ; CHECK: %ymm16 = VPSLLQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPSLLQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPSLLQZ256rr %ymm16, 14
- %ymm16 = VPSLLQZ256rr %ymm16, 14
- ; CHECK: %ymm16 = VPSLLVDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPSLLVDZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPSLLVDZ256rr %ymm16, 14
- %ymm16 = VPSLLVDZ256rr %ymm16, 14
- ; CHECK: %ymm16 = VPSLLVQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPSLLVQZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPSLLVQZ256rr %ymm16, 14
- %ymm16 = VPSLLVQZ256rr %ymm16, 14
- ; CHECK: %ymm16 = VPSLLWZ256ri %ymm16, 7
- %ymm16 = VPSLLWZ256ri %ymm16, 7
- ; CHECK: %ymm16 = VPSLLWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- %ymm16 = VPSLLWZ256rm %ymm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %ymm16 = VPSLLWZ256rr %ymm16, 14
- %ymm16 = VPSLLWZ256rr %ymm16, 14
- ; CHECK: %ymm16 = VCVTDQ2PDZ256rm %rdi, %ymm16, 1, %noreg, 0
- %ymm16 = VCVTDQ2PDZ256rm %rdi, %ymm16, 1, %noreg, 0
- ; CHECK: %ymm16 = VCVTDQ2PDZ256rr %xmm0
- %ymm16 = VCVTDQ2PDZ256rr %xmm0
- ; CHECK: %ymm16 = VCVTDQ2PSZ256rm %rdi, %ymm16, 1, %noreg, 0
- %ymm16 = VCVTDQ2PSZ256rm %rdi, %ymm16, 1, %noreg, 0
- ; CHECK: %ymm16 = VCVTDQ2PSZ256rr %ymm16
- %ymm16 = VCVTDQ2PSZ256rr %ymm16
- ; CHECK: %xmm0 = VCVTPD2DQZ256rm %rdi, %ymm16, 1, %noreg, 0
- %xmm0 = VCVTPD2DQZ256rm %rdi, %ymm16, 1, %noreg, 0
- ; CHECK: %xmm0 = VCVTPD2DQZ256rr %ymm16
- %xmm0 = VCVTPD2DQZ256rr %ymm16
- ; CHECK: %xmm0 = VCVTPD2PSZ256rm %rdi, %ymm16, 1, %noreg, 0
- %xmm0 = VCVTPD2PSZ256rm %rdi, %ymm16, 1, %noreg, 0
- ; CHECK: %xmm0 = VCVTPD2PSZ256rr %ymm16
- %xmm0 = VCVTPD2PSZ256rr %ymm16
- ; CHECK: %ymm16 = VCVTPS2DQZ256rm %rdi, %ymm16, 1, %noreg, 0
- %ymm16 = VCVTPS2DQZ256rm %rdi, %ymm16, 1, %noreg, 0
- ; CHECK: %ymm16 = VCVTPS2DQZ256rr %ymm16
- %ymm16 = VCVTPS2DQZ256rr %ymm16
- ; CHECK: %ymm16 = VCVTPS2PDZ256rm %rdi, %ymm16, 1, %noreg, 0
- %ymm16 = VCVTPS2PDZ256rm %rdi, %ymm16, 1, %noreg, 0
- ; CHECK: %ymm16 = VCVTPS2PDZ256rr %xmm0
- %ymm16 = VCVTPS2PDZ256rr %xmm0
- ; CHECK: VCVTPS2PHZ256mr %rdi, %ymm16, 1, %noreg, 0, %noreg, %noreg
- VCVTPS2PHZ256mr %rdi, %ymm16, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %xmm0 = VCVTPS2PHZ256rr %ymm16, %noreg
- %xmm0 = VCVTPS2PHZ256rr %ymm16, %noreg
- ; CHECK: %ymm16 = VCVTPH2PSZ256rm %rdi, %ymm16, 1, %noreg, 0
- %ymm16 = VCVTPH2PSZ256rm %rdi, %ymm16, 1, %noreg, 0
- ; CHECK: %ymm16 = VCVTPH2PSZ256rr %xmm0
- %ymm16 = VCVTPH2PSZ256rr %xmm0
- ; CHECK: %xmm0 = VCVTTPD2DQZ256rm %rdi, %ymm16, 1, %noreg, 0
- %xmm0 = VCVTTPD2DQZ256rm %rdi, %ymm16, 1, %noreg, 0
- ; CHECK: %xmm0 = VCVTTPD2DQZ256rr %ymm16
- %xmm0 = VCVTTPD2DQZ256rr %ymm16
- ; CHECK: %ymm16 = VCVTTPS2DQZ256rm %rdi, %ymm16, 1, %noreg, 0
- %ymm16 = VCVTTPS2DQZ256rm %rdi, %ymm16, 1, %noreg, 0
- ; CHECK: %ymm16 = VCVTTPS2DQZ256rr %ymm16
- %ymm16 = VCVTTPS2DQZ256rr %ymm16
- ; CHECK: %ymm16 = VSQRTPDZ256m %rdi, %noreg, %noreg, %noreg, %noreg
- %ymm16 = VSQRTPDZ256m %rdi, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %ymm16 = VSQRTPDZ256r %ymm16
- %ymm16 = VSQRTPDZ256r %ymm16
- ; CHECK: %ymm16 = VSQRTPSZ256m %rdi, %noreg, %noreg, %noreg, %noreg
- %ymm16 = VSQRTPSZ256m %rdi, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %ymm16 = VSQRTPSZ256r %ymm16
- %ymm16 = VSQRTPSZ256r %ymm16
- ; CHECK: %ymm16 = VPALIGNRZ256rmi %ymm16, %rdi, %noreg, %noreg, %noreg, %noreg, %noreg
- %ymm16 = VPALIGNRZ256rmi %ymm16, %rdi, %noreg, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %ymm16 = VPALIGNRZ256rri %ymm16, %ymm1, %noreg
- %ymm16 = VPALIGNRZ256rri %ymm16, %ymm1, %noreg
- ; CHECK: %ymm16 = VMOVUPSZ256rm %rdi, 1, %noreg, 0, %noreg
- %ymm16 = VMOVUPSZ256rm %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %ymm16 = VMOVUPSZ256rr %ymm16
- %ymm16 = VMOVUPSZ256rr %ymm16
- ; CHECK: %ymm16 = VMOVUPSZ256rr_REV %ymm16
- %ymm16 = VMOVUPSZ256rr_REV %ymm16
- ; CHECK: %ymm16 = VPSHUFBZ256rm %ymm16, %noreg, %noreg, %noreg, %noreg, %noreg
- %ymm16 = VPSHUFBZ256rm %ymm16, %noreg, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %ymm16 = VPSHUFBZ256rr %ymm16, %ymm1
- %ymm16 = VPSHUFBZ256rr %ymm16, %ymm1
- ; CHECK: %ymm16 = VPSHUFDZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
- %ymm16 = VPSHUFDZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %ymm16 = VPSHUFDZ256ri %ymm16, -24
- %ymm16 = VPSHUFDZ256ri %ymm16, -24
- ; CHECK: %ymm16 = VPSHUFHWZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
- %ymm16 = VPSHUFHWZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %ymm16 = VPSHUFHWZ256ri %ymm16, -24
- %ymm16 = VPSHUFHWZ256ri %ymm16, -24
- ; CHECK: %ymm16 = VPSHUFLWZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
- %ymm16 = VPSHUFLWZ256mi %rdi, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %ymm16 = VPSHUFLWZ256ri %ymm16, -24
- %ymm16 = VPSHUFLWZ256ri %ymm16, -24
- ; CHECK: %ymm16 = VSHUFPDZ256rmi %ymm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
- %ymm16 = VSHUFPDZ256rmi %ymm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %ymm16 = VSHUFPDZ256rri %ymm16, %noreg, %noreg
- %ymm16 = VSHUFPDZ256rri %ymm16, %noreg, %noreg
- ; CHECK: %ymm16 = VSHUFPSZ256rmi %ymm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
- %ymm16 = VSHUFPSZ256rmi %ymm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %ymm16 = VSHUFPSZ256rri %ymm16, %noreg, %noreg
- %ymm16 = VSHUFPSZ256rri %ymm16, %noreg, %noreg
+ ; CHECK: VMOVAPDZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16
+ VMOVAPDZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16
+ ; CHECK: $ymm16 = VMOVAPDZ256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VMOVAPDZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VMOVAPDZ256rr $ymm16
+ $ymm16 = VMOVAPDZ256rr $ymm16
+ ; CHECK: $ymm16 = VMOVAPDZ256rr_REV $ymm16
+ $ymm16 = VMOVAPDZ256rr_REV $ymm16
+ ; CHECK: VMOVAPSZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16
+ VMOVAPSZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16
+ ; CHECK: $ymm16 = VMOVAPSZ256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VMOVAPSZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VMOVAPSZ256rr $ymm16
+ $ymm16 = VMOVAPSZ256rr $ymm16
+ ; CHECK: $ymm16 = VMOVAPSZ256rr_REV $ymm16
+ $ymm16 = VMOVAPSZ256rr_REV $ymm16
+ ; CHECK: $ymm16 = VMOVDDUPZ256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VMOVDDUPZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VMOVDDUPZ256rr $ymm16
+ $ymm16 = VMOVDDUPZ256rr $ymm16
+ ; CHECK: VMOVDQA32Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm16
+ VMOVDQA32Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm16
+ ; CHECK: $ymm16 = VMOVDQA32Z256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VMOVDQA32Z256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VMOVDQA32Z256rr $ymm16
+ $ymm16 = VMOVDQA32Z256rr $ymm16
+ ; CHECK: $ymm16 = VMOVDQA32Z256rr_REV $ymm16
+ $ymm16 = VMOVDQA32Z256rr_REV $ymm16
+ ; CHECK: VMOVDQA64Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm16
+ VMOVDQA64Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm16
+ ; CHECK: $ymm16 = VMOVDQA64Z256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VMOVDQA64Z256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VMOVDQA64Z256rr $ymm16
+ $ymm16 = VMOVDQA64Z256rr $ymm16
+ ; CHECK: $ymm16 = VMOVDQA64Z256rr_REV $ymm16
+ $ymm16 = VMOVDQA64Z256rr_REV $ymm16
+ ; CHECK: VMOVDQU16Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm16
+ VMOVDQU16Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm16
+ ; CHECK: $ymm16 = VMOVDQU16Z256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VMOVDQU16Z256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VMOVDQU16Z256rr $ymm16
+ $ymm16 = VMOVDQU16Z256rr $ymm16
+ ; CHECK: $ymm16 = VMOVDQU16Z256rr_REV $ymm16
+ $ymm16 = VMOVDQU16Z256rr_REV $ymm16
+ ; CHECK: VMOVDQU32Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm16
+ VMOVDQU32Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm16
+ ; CHECK: $ymm16 = VMOVDQU32Z256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VMOVDQU32Z256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VMOVDQU32Z256rr $ymm16
+ $ymm16 = VMOVDQU32Z256rr $ymm16
+ ; CHECK: $ymm16 = VMOVDQU32Z256rr_REV $ymm16
+ $ymm16 = VMOVDQU32Z256rr_REV $ymm16
+ ; CHECK: VMOVDQU64Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm16
+ VMOVDQU64Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm16
+ ; CHECK: $ymm16 = VMOVDQU64Z256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VMOVDQU64Z256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VMOVDQU64Z256rr $ymm16
+ $ymm16 = VMOVDQU64Z256rr $ymm16
+ ; CHECK: $ymm16 = VMOVDQU64Z256rr_REV $ymm16
+ $ymm16 = VMOVDQU64Z256rr_REV $ymm16
+ ; CHECK: VMOVDQU8Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm16
+ VMOVDQU8Z256mr $rdi, 1, $noreg, 0, $noreg, $ymm16
+ ; CHECK: $ymm16 = VMOVDQU8Z256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VMOVDQU8Z256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VMOVDQU8Z256rr $ymm16
+ $ymm16 = VMOVDQU8Z256rr $ymm16
+ ; CHECK: $ymm16 = VMOVDQU8Z256rr_REV $ymm16
+ $ymm16 = VMOVDQU8Z256rr_REV $ymm16
+ ; CHECK: $ymm16 = VMOVNTDQAZ256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VMOVNTDQAZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: VMOVNTDQZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16
+ VMOVNTDQZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16
+ ; CHECK: VMOVNTPDZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16
+ VMOVNTPDZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16
+ ; CHECK: VMOVNTPSZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16
+ VMOVNTPSZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16
+ ; CHECK: $ymm16 = VMOVSHDUPZ256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VMOVSHDUPZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VMOVSHDUPZ256rr $ymm16
+ $ymm16 = VMOVSHDUPZ256rr $ymm16
+ ; CHECK: $ymm16 = VMOVSLDUPZ256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VMOVSLDUPZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VMOVSLDUPZ256rr $ymm16
+ $ymm16 = VMOVSLDUPZ256rr $ymm16
+ ; CHECK: VMOVUPDZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16
+ VMOVUPDZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16
+ ; CHECK: $ymm16 = VMOVUPDZ256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VMOVUPDZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VMOVUPDZ256rr $ymm16
+ $ymm16 = VMOVUPDZ256rr $ymm16
+ ; CHECK: $ymm16 = VMOVUPDZ256rr_REV $ymm16
+ $ymm16 = VMOVUPDZ256rr_REV $ymm16
+ ; CHECK: VMOVUPSZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16
+ VMOVUPSZ256mr $rdi, 1, $noreg, 0, $noreg, $ymm16
+ ; CHECK: $ymm16 = VPANDDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPANDDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPANDDZ256rr $ymm16, $ymm1
+ $ymm16 = VPANDDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPANDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPANDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPANDQZ256rr $ymm16, $ymm1
+ $ymm16 = VPANDQZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPANDNDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPANDNDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPANDNDZ256rr $ymm16, $ymm1
+ $ymm16 = VPANDNDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPANDNQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPANDNQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPANDNQZ256rr $ymm16, $ymm1
+ $ymm16 = VPANDNQZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPAVGBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPAVGBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPAVGBZ256rr $ymm16, $ymm1
+ $ymm16 = VPAVGBZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPAVGWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPAVGWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPAVGWZ256rr $ymm16, $ymm1
+ $ymm16 = VPAVGWZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPADDBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPADDBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPADDBZ256rr $ymm16, $ymm1
+ $ymm16 = VPADDBZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPADDDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPADDDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPADDDZ256rr $ymm16, $ymm1
+ $ymm16 = VPADDDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPADDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPADDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPADDQZ256rr $ymm16, $ymm1
+ $ymm16 = VPADDQZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPADDSBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPADDSBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPADDSBZ256rr $ymm16, $ymm1
+ $ymm16 = VPADDSBZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPADDSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPADDSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPADDSWZ256rr $ymm16, $ymm1
+ $ymm16 = VPADDSWZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPADDUSBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPADDUSBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPADDUSBZ256rr $ymm16, $ymm1
+ $ymm16 = VPADDUSBZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPADDUSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPADDUSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPADDUSWZ256rr $ymm16, $ymm1
+ $ymm16 = VPADDUSWZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPADDWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPADDWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPADDWZ256rr $ymm16, $ymm1
+ $ymm16 = VPADDWZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VMULPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VMULPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VMULPDZ256rr $ymm16, $ymm1
+ $ymm16 = VMULPDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VMULPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VMULPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VMULPSZ256rr $ymm16, $ymm1
+ $ymm16 = VMULPSZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VORPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VORPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VORPDZ256rr $ymm16, $ymm1
+ $ymm16 = VORPDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VORPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VORPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VORPSZ256rr $ymm16, $ymm1
+ $ymm16 = VORPSZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPMADDUBSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMADDUBSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMADDUBSWZ256rr $ymm16, $ymm1
+ $ymm16 = VPMADDUBSWZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPMADDWDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMADDWDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMADDWDZ256rr $ymm16, $ymm1
+ $ymm16 = VPMADDWDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPMAXSBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMAXSBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMAXSBZ256rr $ymm16, $ymm1
+ $ymm16 = VPMAXSBZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPMAXSDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMAXSDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMAXSDZ256rr $ymm16, $ymm1
+ $ymm16 = VPMAXSDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPMAXSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMAXSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMAXSWZ256rr $ymm16, $ymm1
+ $ymm16 = VPMAXSWZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPMAXUBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMAXUBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMAXUBZ256rr $ymm16, $ymm1
+ $ymm16 = VPMAXUBZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPMAXUDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMAXUDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMAXUDZ256rr $ymm16, $ymm1
+ $ymm16 = VPMAXUDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPMAXUWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMAXUWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMAXUWZ256rr $ymm16, $ymm1
+ $ymm16 = VPMAXUWZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPMINSBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMINSBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMINSBZ256rr $ymm16, $ymm1
+ $ymm16 = VPMINSBZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPMINSDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMINSDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMINSDZ256rr $ymm16, $ymm1
+ $ymm16 = VPMINSDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPMINSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMINSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMINSWZ256rr $ymm16, $ymm1
+ $ymm16 = VPMINSWZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPMINUBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMINUBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMINUBZ256rr $ymm16, $ymm1
+ $ymm16 = VPMINUBZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPMINUDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMINUDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMINUDZ256rr $ymm16, $ymm1
+ $ymm16 = VPMINUDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPMINUWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMINUWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMINUWZ256rr $ymm16, $ymm1
+ $ymm16 = VPMINUWZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPMULDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMULDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMULDQZ256rr $ymm16, $ymm1
+ $ymm16 = VPMULDQZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPMULHRSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMULHRSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMULHRSWZ256rr $ymm16, $ymm1
+ $ymm16 = VPMULHRSWZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPMULHUWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMULHUWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMULHUWZ256rr $ymm16, $ymm1
+ $ymm16 = VPMULHUWZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPMULHWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMULHWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMULHWZ256rr $ymm16, $ymm1
+ $ymm16 = VPMULHWZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPMULLDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMULLDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMULLDZ256rr $ymm16, $ymm1
+ $ymm16 = VPMULLDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPMULLWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMULLWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMULLWZ256rr $ymm16, $ymm1
+ $ymm16 = VPMULLWZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPMULUDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMULUDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMULUDQZ256rr $ymm16, $ymm1
+ $ymm16 = VPMULUDQZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPORDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPORDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPORDZ256rr $ymm16, $ymm1
+ $ymm16 = VPORDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPORQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPORQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPORQZ256rr $ymm16, $ymm1
+ $ymm16 = VPORQZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPSUBBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPSUBBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPSUBBZ256rr $ymm16, $ymm1
+ $ymm16 = VPSUBBZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPSUBDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPSUBDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPSUBDZ256rr $ymm16, $ymm1
+ $ymm16 = VPSUBDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPSUBQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPSUBQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPSUBQZ256rr $ymm16, $ymm1
+ $ymm16 = VPSUBQZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPSUBSBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPSUBSBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPSUBSBZ256rr $ymm16, $ymm1
+ $ymm16 = VPSUBSBZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPSUBSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPSUBSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPSUBSWZ256rr $ymm16, $ymm1
+ $ymm16 = VPSUBSWZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPSUBUSBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPSUBUSBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPSUBUSBZ256rr $ymm16, $ymm1
+ $ymm16 = VPSUBUSBZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPSUBUSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPSUBUSWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPSUBUSWZ256rr $ymm16, $ymm1
+ $ymm16 = VPSUBUSWZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPSUBWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPSUBWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPSUBWZ256rr $ymm16, $ymm1
+ $ymm16 = VPSUBWZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPXORDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPXORDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPXORDZ256rr $ymm16, $ymm1
+ $ymm16 = VPXORDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPXORQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPXORQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPXORQZ256rr $ymm16, $ymm1
+ $ymm16 = VPXORQZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VADDPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VADDPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VADDPDZ256rr $ymm16, $ymm1
+ $ymm16 = VADDPDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VADDPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VADDPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VADDPSZ256rr $ymm16, $ymm1
+ $ymm16 = VADDPSZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VANDNPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VANDNPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VANDNPDZ256rr $ymm16, $ymm1
+ $ymm16 = VANDNPDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VANDNPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VANDNPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VANDNPSZ256rr $ymm16, $ymm1
+ $ymm16 = VANDNPSZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VANDPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VANDPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VANDPDZ256rr $ymm16, $ymm1
+ $ymm16 = VANDPDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VANDPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VANDPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VANDPSZ256rr $ymm16, $ymm1
+ $ymm16 = VANDPSZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VDIVPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VDIVPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VDIVPDZ256rr $ymm16, $ymm1
+ $ymm16 = VDIVPDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VDIVPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VDIVPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VDIVPSZ256rr $ymm16, $ymm1
+ $ymm16 = VDIVPSZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VMAXCPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VMAXCPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VMAXCPDZ256rr $ymm16, $ymm1
+ $ymm16 = VMAXCPDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VMAXCPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VMAXCPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VMAXCPSZ256rr $ymm16, $ymm1
+ $ymm16 = VMAXCPSZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VMAXPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VMAXPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VMAXPDZ256rr $ymm16, $ymm1
+ $ymm16 = VMAXPDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VMAXPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VMAXPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VMAXPSZ256rr $ymm16, $ymm1
+ $ymm16 = VMAXPSZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VMINCPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VMINCPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VMINCPDZ256rr $ymm16, $ymm1
+ $ymm16 = VMINCPDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VMINCPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VMINCPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VMINCPSZ256rr $ymm16, $ymm1
+ $ymm16 = VMINCPSZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VMINPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VMINPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VMINPDZ256rr $ymm16, $ymm1
+ $ymm16 = VMINPDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VMINPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VMINPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VMINPSZ256rr $ymm16, $ymm1
+ $ymm16 = VMINPSZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VXORPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VXORPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VXORPDZ256rr $ymm16, $ymm1
+ $ymm16 = VXORPDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VXORPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VXORPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VXORPSZ256rr $ymm16, $ymm1
+ $ymm16 = VXORPSZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPACKSSDWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPACKSSDWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPACKSSDWZ256rr $ymm16, $ymm1
+ $ymm16 = VPACKSSDWZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPACKSSWBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPACKSSWBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPACKSSWBZ256rr $ymm16, $ymm1
+ $ymm16 = VPACKSSWBZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPACKUSDWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPACKUSDWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPACKUSDWZ256rr $ymm16, $ymm1
+ $ymm16 = VPACKUSDWZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPACKUSWBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPACKUSWBZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPACKUSWBZ256rr $ymm16, $ymm1
+ $ymm16 = VPACKUSWBZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VUNPCKHPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VUNPCKHPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VUNPCKHPDZ256rr $ymm16, $ymm1
+ $ymm16 = VUNPCKHPDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VUNPCKHPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VUNPCKHPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VUNPCKHPSZ256rr $ymm16, $ymm1
+ $ymm16 = VUNPCKHPSZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VUNPCKLPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VUNPCKLPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VUNPCKLPDZ256rr $ymm16, $ymm1
+ $ymm16 = VUNPCKLPDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VUNPCKLPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VUNPCKLPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VUNPCKLPSZ256rr $ymm16, $ymm1
+ $ymm16 = VUNPCKLPSZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VSUBPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VSUBPDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VSUBPDZ256rr $ymm16, $ymm1
+ $ymm16 = VSUBPDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VSUBPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VSUBPSZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VSUBPSZ256rr $ymm16, $ymm1
+ $ymm16 = VSUBPSZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPUNPCKHBWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPUNPCKHBWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPUNPCKHBWZ256rr $ymm16, $ymm1
+ $ymm16 = VPUNPCKHBWZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPUNPCKHDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPUNPCKHDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPUNPCKHDQZ256rr $ymm16, $ymm1
+ $ymm16 = VPUNPCKHDQZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPUNPCKHQDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPUNPCKHQDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPUNPCKHQDQZ256rr $ymm16, $ymm1
+ $ymm16 = VPUNPCKHQDQZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPUNPCKHWDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPUNPCKHWDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPUNPCKHWDZ256rr $ymm16, $ymm1
+ $ymm16 = VPUNPCKHWDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPUNPCKLBWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPUNPCKLBWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPUNPCKLBWZ256rr $ymm16, $ymm1
+ $ymm16 = VPUNPCKLBWZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPUNPCKLDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPUNPCKLDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPUNPCKLDQZ256rr $ymm16, $ymm1
+ $ymm16 = VPUNPCKLDQZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPUNPCKLQDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPUNPCKLQDQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPUNPCKLQDQZ256rr $ymm16, $ymm1
+ $ymm16 = VPUNPCKLQDQZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPUNPCKLWDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPUNPCKLWDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPUNPCKLWDZ256rr $ymm16, $ymm1
+ $ymm16 = VPUNPCKLWDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VFMADD132PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFMADD132PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFMADD132PDZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFMADD132PDZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFMADD132PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFMADD132PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFMADD132PSZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFMADD132PSZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFMADD213PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFMADD213PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFMADD213PDZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFMADD213PDZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFMADD213PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFMADD213PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFMADD213PSZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFMADD213PSZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFMADD231PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFMADD231PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFMADD231PDZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFMADD231PDZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFMADD231PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFMADD231PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFMADD231PSZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFMADD231PSZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFMADDSUB132PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFMADDSUB132PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFMADDSUB132PDZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFMADDSUB132PDZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFMADDSUB132PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFMADDSUB132PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFMADDSUB132PSZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFMADDSUB132PSZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFMADDSUB213PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFMADDSUB213PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFMADDSUB213PDZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFMADDSUB213PDZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFMADDSUB213PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFMADDSUB213PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFMADDSUB213PSZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFMADDSUB213PSZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFMADDSUB231PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFMADDSUB231PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFMADDSUB231PDZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFMADDSUB231PDZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFMADDSUB231PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFMADDSUB231PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFMADDSUB231PSZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFMADDSUB231PSZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFMSUB132PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFMSUB132PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFMSUB132PDZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFMSUB132PDZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFMSUB132PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFMSUB132PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFMSUB132PSZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFMSUB132PSZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFMSUB213PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFMSUB213PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFMSUB213PDZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFMSUB213PDZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFMSUB213PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFMSUB213PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFMSUB213PSZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFMSUB213PSZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFMSUB231PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFMSUB231PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFMSUB231PDZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFMSUB231PDZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFMSUB231PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFMSUB231PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFMSUB231PSZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFMSUB231PSZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFMSUBADD132PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFMSUBADD132PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFMSUBADD132PDZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFMSUBADD132PDZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFMSUBADD132PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFMSUBADD132PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFMSUBADD132PSZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFMSUBADD132PSZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFMSUBADD213PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFMSUBADD213PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFMSUBADD213PDZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFMSUBADD213PDZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFMSUBADD213PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFMSUBADD213PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFMSUBADD213PSZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFMSUBADD213PSZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFMSUBADD231PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFMSUBADD231PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFMSUBADD231PDZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFMSUBADD231PDZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFMSUBADD231PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFMSUBADD231PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFMSUBADD231PSZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFMSUBADD231PSZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFNMADD132PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFNMADD132PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFNMADD132PDZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFNMADD132PDZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFNMADD132PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFNMADD132PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFNMADD132PSZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFNMADD132PSZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFNMADD213PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFNMADD213PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFNMADD213PDZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFNMADD213PDZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFNMADD213PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFNMADD213PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFNMADD213PSZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFNMADD213PSZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFNMADD231PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFNMADD231PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFNMADD231PDZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFNMADD231PDZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFNMADD231PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFNMADD231PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFNMADD231PSZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFNMADD231PSZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFNMSUB132PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFNMSUB132PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFNMSUB132PDZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFNMSUB132PDZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFNMSUB132PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFNMSUB132PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFNMSUB132PSZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFNMSUB132PSZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFNMSUB213PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFNMSUB213PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFNMSUB213PDZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFNMSUB213PDZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFNMSUB213PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFNMSUB213PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFNMSUB213PSZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFNMSUB213PSZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFNMSUB231PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFNMSUB231PDZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFNMSUB231PDZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFNMSUB231PDZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VFNMSUB231PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ $ymm16 = VFNMSUB231PSZ256m $ymm16, $ymm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VFNMSUB231PSZ256r $ymm16, $ymm1, $ymm2
+ $ymm16 = VFNMSUB231PSZ256r $ymm16, $ymm1, $ymm2
+ ; CHECK: $ymm16 = VPSRADZ256ri $ymm16, 7
+ $ymm16 = VPSRADZ256ri $ymm16, 7
+ ; CHECK: $ymm16 = VPSRADZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPSRADZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPSRADZ256rr $ymm16, $xmm1
+ $ymm16 = VPSRADZ256rr $ymm16, $xmm1
+ ; CHECK: $ymm16 = VPSRAVDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPSRAVDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPSRAVDZ256rr $ymm16, $ymm1
+ $ymm16 = VPSRAVDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPSRAWZ256ri $ymm16, 7
+ $ymm16 = VPSRAWZ256ri $ymm16, 7
+ ; CHECK: $ymm16 = VPSRAWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPSRAWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPSRAWZ256rr $ymm16, $xmm1
+ $ymm16 = VPSRAWZ256rr $ymm16, $xmm1
+ ; CHECK: $ymm16 = VPSRLDQZ256rr $ymm16, $ymm1
+ $ymm16 = VPSRLDQZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPSRLDZ256ri $ymm16, 7
+ $ymm16 = VPSRLDZ256ri $ymm16, 7
+ ; CHECK: $ymm16 = VPSRLDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPSRLDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPSRLDZ256rr $ymm16, $xmm1
+ $ymm16 = VPSRLDZ256rr $ymm16, $xmm1
+ ; CHECK: $ymm16 = VPSRLQZ256ri $ymm16, 7
+ $ymm16 = VPSRLQZ256ri $ymm16, 7
+ ; CHECK: $ymm16 = VPSRLQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPSRLQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPSRLQZ256rr $ymm16, $xmm1
+ $ymm16 = VPSRLQZ256rr $ymm16, $xmm1
+ ; CHECK: $ymm16 = VPSRLVDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPSRLVDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPSRLVDZ256rr $ymm16, $ymm1
+ $ymm16 = VPSRLVDZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPSRLVQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPSRLVQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPSRLVQZ256rr $ymm16, $ymm1
+ $ymm16 = VPSRLVQZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPSRLWZ256ri $ymm16, 7
+ $ymm16 = VPSRLWZ256ri $ymm16, 7
+ ; CHECK: $ymm16 = VPSRLWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPSRLWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPSRLWZ256rr $ymm16, $xmm1
+ $ymm16 = VPSRLWZ256rr $ymm16, $xmm1
+ ; CHECK: $ymm16 = VPMOVSXBDZ256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMOVSXBDZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMOVSXBDZ256rr $xmm0
+ $ymm16 = VPMOVSXBDZ256rr $xmm0
+ ; CHECK: $ymm16 = VPMOVSXBQZ256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMOVSXBQZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMOVSXBQZ256rr $xmm0
+ $ymm16 = VPMOVSXBQZ256rr $xmm0
+ ; CHECK: $ymm16 = VPMOVSXBWZ256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMOVSXBWZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMOVSXBWZ256rr $xmm0
+ $ymm16 = VPMOVSXBWZ256rr $xmm0
+ ; CHECK: $ymm16 = VPMOVSXDQZ256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMOVSXDQZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMOVSXDQZ256rr $xmm0
+ $ymm16 = VPMOVSXDQZ256rr $xmm0
+ ; CHECK: $ymm16 = VPMOVSXWDZ256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMOVSXWDZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMOVSXWDZ256rr $xmm0
+ $ymm16 = VPMOVSXWDZ256rr $xmm0
+ ; CHECK: $ymm16 = VPMOVSXWQZ256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMOVSXWQZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMOVSXWQZ256rr $xmm0
+ $ymm16 = VPMOVSXWQZ256rr $xmm0
+ ; CHECK: $ymm16 = VPMOVZXBDZ256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMOVZXBDZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMOVZXBDZ256rr $xmm0
+ $ymm16 = VPMOVZXBDZ256rr $xmm0
+ ; CHECK: $ymm16 = VPMOVZXBQZ256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMOVZXBQZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMOVZXBQZ256rr $xmm0
+ $ymm16 = VPMOVZXBQZ256rr $xmm0
+ ; CHECK: $ymm16 = VPMOVZXBWZ256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMOVZXBWZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMOVZXBWZ256rr $xmm0
+ $ymm16 = VPMOVZXBWZ256rr $xmm0
+ ; CHECK: $ymm16 = VPMOVZXDQZ256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMOVZXDQZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMOVZXDQZ256rr $xmm0
+ $ymm16 = VPMOVZXDQZ256rr $xmm0
+ ; CHECK: $ymm16 = VPMOVZXWDZ256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMOVZXWDZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMOVZXWDZ256rr $xmm0
+ $ymm16 = VPMOVZXWDZ256rr $xmm0
+ ; CHECK: $ymm16 = VPMOVZXWQZ256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPMOVZXWQZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPMOVZXWQZ256rr $xmm0
+ $ymm16 = VPMOVZXWQZ256rr $xmm0
+ ; CHECK: $ymm16 = VBROADCASTF32X2Z256m $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VBROADCASTF32X2Z256m $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VBROADCASTF32X2Z256r $xmm16
+ $ymm16 = VBROADCASTF32X2Z256r $xmm16
+ ; CHECK: $ymm16 = VBROADCASTF32X4Z256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VBROADCASTF32X4Z256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VBROADCASTSDZ256m $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VBROADCASTSDZ256m $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VBROADCASTSDZ256r $xmm0
+ $ymm16 = VBROADCASTSDZ256r $xmm0
+ ; CHECK: $ymm16 = VBROADCASTSSZ256m $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VBROADCASTSSZ256m $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VBROADCASTSSZ256r $xmm0
+ $ymm16 = VBROADCASTSSZ256r $xmm0
+ ; CHECK: $ymm16 = VPBROADCASTBZ256m $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPBROADCASTBZ256m $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPBROADCASTBZ256r $xmm0
+ $ymm16 = VPBROADCASTBZ256r $xmm0
+ ; CHECK: $ymm16 = VPBROADCASTDZ256m $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPBROADCASTDZ256m $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPBROADCASTDZ256r $xmm0
+ $ymm16 = VPBROADCASTDZ256r $xmm0
+ ; CHECK: $ymm16 = VPBROADCASTWZ256m $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPBROADCASTWZ256m $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPBROADCASTWZ256r $xmm0
+ $ymm16 = VPBROADCASTWZ256r $xmm0
+ ; CHECK: $ymm16 = VBROADCASTI32X4Z256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VBROADCASTI32X4Z256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VBROADCASTI32X2Z256m $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VBROADCASTI32X2Z256m $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VBROADCASTI32X2Z256r $xmm16
+ $ymm16 = VBROADCASTI32X2Z256r $xmm16
+ ; CHECK: $ymm16 = VPBROADCASTQZ256m $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPBROADCASTQZ256m $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPBROADCASTQZ256r $xmm0
+ $ymm16 = VPBROADCASTQZ256r $xmm0
+ ; CHECK: $ymm16 = VPABSBZ256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPABSBZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPABSBZ256rr $ymm16
+ $ymm16 = VPABSBZ256rr $ymm16
+ ; CHECK: $ymm16 = VPABSDZ256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPABSDZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPABSDZ256rr $ymm16
+ $ymm16 = VPABSDZ256rr $ymm16
+ ; CHECK: $ymm16 = VPABSWZ256rm $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPABSWZ256rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPABSWZ256rr $ymm16
+ $ymm16 = VPABSWZ256rr $ymm16
+ ; CHECK: $ymm16 = VPSADBWZ256rm $ymm16, 1, $noreg, $rax, $noreg, $noreg
+ $ymm16 = VPSADBWZ256rm $ymm16, 1, $noreg, $rax, $noreg, $noreg
+ ; CHECK: $ymm16 = VPSADBWZ256rr $ymm16, $ymm1
+ $ymm16 = VPSADBWZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPERMDZ256rm $ymm16, $rdi, 1, $noreg, 0, $noreg
+ $ymm16 = VPERMDZ256rm $ymm16, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VPERMDZ256rr $ymm1, $ymm16
+ $ymm16 = VPERMDZ256rr $ymm1, $ymm16
+ ; CHECK: $ymm16 = VPERMILPDZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ $ymm16 = VPERMILPDZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $ymm16 = VPERMILPDZ256ri $ymm16, 7
+ $ymm16 = VPERMILPDZ256ri $ymm16, 7
+ ; CHECK: $ymm16 = VPERMILPDZ256rm $ymm16, $rdi, 1, $noreg, 0, $noreg
+ $ymm16 = VPERMILPDZ256rm $ymm16, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VPERMILPDZ256rr $ymm1, $ymm16
+ $ymm16 = VPERMILPDZ256rr $ymm1, $ymm16
+ ; CHECK: $ymm16 = VPERMILPSZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ $ymm16 = VPERMILPSZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $ymm16 = VPERMILPSZ256ri $ymm16, 7
+ $ymm16 = VPERMILPSZ256ri $ymm16, 7
+ ; CHECK: $ymm16 = VPERMILPSZ256rm $ymm16, $rdi, 1, $noreg, 0, $noreg
+ $ymm16 = VPERMILPSZ256rm $ymm16, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VPERMILPSZ256rr $ymm1, $ymm16
+ $ymm16 = VPERMILPSZ256rr $ymm1, $ymm16
+ ; CHECK: $ymm16 = VPERMPDZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ $ymm16 = VPERMPDZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $ymm16 = VPERMPDZ256ri $ymm16, 7
+ $ymm16 = VPERMPDZ256ri $ymm16, 7
+ ; CHECK: $ymm16 = VPERMPSZ256rm $ymm16, $rdi, 1, $noreg, 0, $noreg
+ $ymm16 = VPERMPSZ256rm $ymm16, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VPERMPSZ256rr $ymm1, $ymm16
+ $ymm16 = VPERMPSZ256rr $ymm1, $ymm16
+ ; CHECK: $ymm16 = VPERMQZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ $ymm16 = VPERMQZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $ymm16 = VPERMQZ256ri $ymm16, 7
+ $ymm16 = VPERMQZ256ri $ymm16, 7
+ ; CHECK: $ymm16 = VPSLLDQZ256rr $ymm16, 14
+ $ymm16 = VPSLLDQZ256rr $ymm16, 14
+ ; CHECK: $ymm16 = VPSLLDZ256ri $ymm16, 7
+ $ymm16 = VPSLLDZ256ri $ymm16, 7
+ ; CHECK: $ymm16 = VPSLLDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPSLLDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPSLLDZ256rr $ymm16, 14
+ $ymm16 = VPSLLDZ256rr $ymm16, 14
+ ; CHECK: $ymm16 = VPSLLQZ256ri $ymm16, 7
+ $ymm16 = VPSLLQZ256ri $ymm16, 7
+ ; CHECK: $ymm16 = VPSLLQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPSLLQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPSLLQZ256rr $ymm16, 14
+ $ymm16 = VPSLLQZ256rr $ymm16, 14
+ ; CHECK: $ymm16 = VPSLLVDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPSLLVDZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPSLLVDZ256rr $ymm16, 14
+ $ymm16 = VPSLLVDZ256rr $ymm16, 14
+ ; CHECK: $ymm16 = VPSLLVQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPSLLVQZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPSLLVQZ256rr $ymm16, 14
+ $ymm16 = VPSLLVQZ256rr $ymm16, 14
+ ; CHECK: $ymm16 = VPSLLWZ256ri $ymm16, 7
+ $ymm16 = VPSLLWZ256ri $ymm16, 7
+ ; CHECK: $ymm16 = VPSLLWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ $ymm16 = VPSLLWZ256rm $ymm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $ymm16 = VPSLLWZ256rr $ymm16, 14
+ $ymm16 = VPSLLWZ256rr $ymm16, 14
+ ; CHECK: $ymm16 = VCVTDQ2PDZ256rm $rdi, $ymm16, 1, $noreg, 0
+ $ymm16 = VCVTDQ2PDZ256rm $rdi, $ymm16, 1, $noreg, 0
+ ; CHECK: $ymm16 = VCVTDQ2PDZ256rr $xmm0
+ $ymm16 = VCVTDQ2PDZ256rr $xmm0
+ ; CHECK: $ymm16 = VCVTDQ2PSZ256rm $rdi, $ymm16, 1, $noreg, 0
+ $ymm16 = VCVTDQ2PSZ256rm $rdi, $ymm16, 1, $noreg, 0
+ ; CHECK: $ymm16 = VCVTDQ2PSZ256rr $ymm16
+ $ymm16 = VCVTDQ2PSZ256rr $ymm16
+ ; CHECK: $xmm0 = VCVTPD2DQZ256rm $rdi, $ymm16, 1, $noreg, 0
+ $xmm0 = VCVTPD2DQZ256rm $rdi, $ymm16, 1, $noreg, 0
+ ; CHECK: $xmm0 = VCVTPD2DQZ256rr $ymm16
+ $xmm0 = VCVTPD2DQZ256rr $ymm16
+ ; CHECK: $xmm0 = VCVTPD2PSZ256rm $rdi, $ymm16, 1, $noreg, 0
+ $xmm0 = VCVTPD2PSZ256rm $rdi, $ymm16, 1, $noreg, 0
+ ; CHECK: $xmm0 = VCVTPD2PSZ256rr $ymm16
+ $xmm0 = VCVTPD2PSZ256rr $ymm16
+ ; CHECK: $ymm16 = VCVTPS2DQZ256rm $rdi, $ymm16, 1, $noreg, 0
+ $ymm16 = VCVTPS2DQZ256rm $rdi, $ymm16, 1, $noreg, 0
+ ; CHECK: $ymm16 = VCVTPS2DQZ256rr $ymm16
+ $ymm16 = VCVTPS2DQZ256rr $ymm16
+ ; CHECK: $ymm16 = VCVTPS2PDZ256rm $rdi, $ymm16, 1, $noreg, 0
+ $ymm16 = VCVTPS2PDZ256rm $rdi, $ymm16, 1, $noreg, 0
+ ; CHECK: $ymm16 = VCVTPS2PDZ256rr $xmm0
+ $ymm16 = VCVTPS2PDZ256rr $xmm0
+ ; CHECK: VCVTPS2PHZ256mr $rdi, $ymm16, 1, $noreg, 0, $noreg, $noreg
+ VCVTPS2PHZ256mr $rdi, $ymm16, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $xmm0 = VCVTPS2PHZ256rr $ymm16, $noreg
+ $xmm0 = VCVTPS2PHZ256rr $ymm16, $noreg
+ ; CHECK: $ymm16 = VCVTPH2PSZ256rm $rdi, $ymm16, 1, $noreg, 0
+ $ymm16 = VCVTPH2PSZ256rm $rdi, $ymm16, 1, $noreg, 0
+ ; CHECK: $ymm16 = VCVTPH2PSZ256rr $xmm0
+ $ymm16 = VCVTPH2PSZ256rr $xmm0
+ ; CHECK: $xmm0 = VCVTTPD2DQZ256rm $rdi, $ymm16, 1, $noreg, 0
+ $xmm0 = VCVTTPD2DQZ256rm $rdi, $ymm16, 1, $noreg, 0
+ ; CHECK: $xmm0 = VCVTTPD2DQZ256rr $ymm16
+ $xmm0 = VCVTTPD2DQZ256rr $ymm16
+ ; CHECK: $ymm16 = VCVTTPS2DQZ256rm $rdi, $ymm16, 1, $noreg, 0
+ $ymm16 = VCVTTPS2DQZ256rm $rdi, $ymm16, 1, $noreg, 0
+ ; CHECK: $ymm16 = VCVTTPS2DQZ256rr $ymm16
+ $ymm16 = VCVTTPS2DQZ256rr $ymm16
+ ; CHECK: $ymm16 = VSQRTPDZ256m $rdi, $noreg, $noreg, $noreg, $noreg
+ $ymm16 = VSQRTPDZ256m $rdi, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $ymm16 = VSQRTPDZ256r $ymm16
+ $ymm16 = VSQRTPDZ256r $ymm16
+ ; CHECK: $ymm16 = VSQRTPSZ256m $rdi, $noreg, $noreg, $noreg, $noreg
+ $ymm16 = VSQRTPSZ256m $rdi, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $ymm16 = VSQRTPSZ256r $ymm16
+ $ymm16 = VSQRTPSZ256r $ymm16
+ ; CHECK: $ymm16 = VPALIGNRZ256rmi $ymm16, $rdi, $noreg, $noreg, $noreg, $noreg, $noreg
+ $ymm16 = VPALIGNRZ256rmi $ymm16, $rdi, $noreg, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $ymm16 = VPALIGNRZ256rri $ymm16, $ymm1, $noreg
+ $ymm16 = VPALIGNRZ256rri $ymm16, $ymm1, $noreg
+ ; CHECK: $ymm16 = VMOVUPSZ256rm $rdi, 1, $noreg, 0, $noreg
+ $ymm16 = VMOVUPSZ256rm $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $ymm16 = VMOVUPSZ256rr $ymm16
+ $ymm16 = VMOVUPSZ256rr $ymm16
+ ; CHECK: $ymm16 = VMOVUPSZ256rr_REV $ymm16
+ $ymm16 = VMOVUPSZ256rr_REV $ymm16
+ ; CHECK: $ymm16 = VPSHUFBZ256rm $ymm16, $noreg, $noreg, $noreg, $noreg, $noreg
+ $ymm16 = VPSHUFBZ256rm $ymm16, $noreg, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $ymm16 = VPSHUFBZ256rr $ymm16, $ymm1
+ $ymm16 = VPSHUFBZ256rr $ymm16, $ymm1
+ ; CHECK: $ymm16 = VPSHUFDZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ $ymm16 = VPSHUFDZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $ymm16 = VPSHUFDZ256ri $ymm16, -24
+ $ymm16 = VPSHUFDZ256ri $ymm16, -24
+ ; CHECK: $ymm16 = VPSHUFHWZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ $ymm16 = VPSHUFHWZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $ymm16 = VPSHUFHWZ256ri $ymm16, -24
+ $ymm16 = VPSHUFHWZ256ri $ymm16, -24
+ ; CHECK: $ymm16 = VPSHUFLWZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ $ymm16 = VPSHUFLWZ256mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $ymm16 = VPSHUFLWZ256ri $ymm16, -24
+ $ymm16 = VPSHUFLWZ256ri $ymm16, -24
+ ; CHECK: $ymm16 = VSHUFPDZ256rmi $ymm16, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg
+ $ymm16 = VSHUFPDZ256rmi $ymm16, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $ymm16 = VSHUFPDZ256rri $ymm16, $noreg, $noreg
+ $ymm16 = VSHUFPDZ256rri $ymm16, $noreg, $noreg
+ ; CHECK: $ymm16 = VSHUFPSZ256rmi $ymm16, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg
+ $ymm16 = VSHUFPSZ256rmi $ymm16, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $ymm16 = VSHUFPSZ256rri $ymm16, $noreg, $noreg
+ $ymm16 = VSHUFPSZ256rri $ymm16, $noreg, $noreg
- RET 0, %zmm0, %zmm1
+ RET 0, $zmm0, $zmm1
...
---
# CHECK-LABEL: name: evex_z128_to_evex_test
@@ -3208,876 +3208,876 @@
name: evex_z128_to_evex_test
body: |
bb.0:
- ; CHECK: VMOVAPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- VMOVAPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- ; CHECK: %xmm16 = VMOVAPDZ128rm %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMOVAPDZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMOVAPDZ128rr %xmm16
- %xmm16 = VMOVAPDZ128rr %xmm16
- ; CHECK: VMOVAPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- VMOVAPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- ; CHECK: %xmm16 = VMOVAPSZ128rm %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMOVAPSZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMOVAPSZ128rr %xmm16
- %xmm16 = VMOVAPSZ128rr %xmm16
- ; CHECK: VMOVDQA32Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- VMOVDQA32Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- ; CHECK: %xmm16 = VMOVDQA32Z128rm %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMOVDQA32Z128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMOVDQA32Z128rr %xmm16
- %xmm16 = VMOVDQA32Z128rr %xmm16
- ; CHECK: VMOVDQA64Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- VMOVDQA64Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- ; CHECK: %xmm16 = VMOVDQA64Z128rm %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMOVDQA64Z128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMOVDQA64Z128rr %xmm16
- %xmm16 = VMOVDQA64Z128rr %xmm16
- ; CHECK: VMOVDQU16Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- VMOVDQU16Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- ; CHECK: %xmm16 = VMOVDQU16Z128rm %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMOVDQU16Z128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMOVDQU16Z128rr %xmm16
- %xmm16 = VMOVDQU16Z128rr %xmm16
- ; CHECK: VMOVDQU32Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- VMOVDQU32Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- ; CHECK: %xmm16 = VMOVDQU32Z128rm %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMOVDQU32Z128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMOVDQU32Z128rr %xmm16
- %xmm16 = VMOVDQU32Z128rr %xmm16
- ; CHECK: VMOVDQU64Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- VMOVDQU64Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- ; CHECK: %xmm16 = VMOVDQU64Z128rm %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMOVDQU64Z128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMOVDQU64Z128rr %xmm16
- %xmm16 = VMOVDQU64Z128rr %xmm16
- ; CHECK: VMOVDQU8Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- VMOVDQU8Z128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- ; CHECK: %xmm16 = VMOVDQU8Z128rm %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMOVDQU8Z128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMOVDQU8Z128rr %xmm16
- %xmm16 = VMOVDQU8Z128rr %xmm16
- ; CHECK: %xmm16 = VMOVDQU8Z128rr_REV %xmm16
- %xmm16 = VMOVDQU8Z128rr_REV %xmm16
- ; CHECK: %xmm16 = VMOVNTDQAZ128rm %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMOVNTDQAZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: VMOVUPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- VMOVUPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- ; CHECK: %xmm16 = VMOVUPDZ128rm %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMOVUPDZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMOVUPDZ128rr %xmm16
- %xmm16 = VMOVUPDZ128rr %xmm16
- ; CHECK: %xmm16 = VMOVUPDZ128rr_REV %xmm16
- %xmm16 = VMOVUPDZ128rr_REV %xmm16
- ; CHECK: VMOVUPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- VMOVUPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- ; CHECK: %xmm16 = VMOVUPSZ128rm %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMOVUPSZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMOVUPSZ128rr %xmm16
- %xmm16 = VMOVUPSZ128rr %xmm16
- ; CHECK: %xmm16 = VMOVUPSZ128rr_REV %xmm16
- %xmm16 = VMOVUPSZ128rr_REV %xmm16
- ; CHECK: VMOVNTDQZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- VMOVNTDQZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- ; CHECK: VMOVNTPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- VMOVNTPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- ; CHECK: VMOVNTPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- VMOVNTPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- ; CHECK: %xmm16 = VMOVAPDZ128rr_REV %xmm16
- %xmm16 = VMOVAPDZ128rr_REV %xmm16
- ; CHECK: %xmm16 = VMOVAPSZ128rr_REV %xmm16
- %xmm16 = VMOVAPSZ128rr_REV %xmm16
- ; CHECK: %xmm16 = VMOVDQA32Z128rr_REV %xmm16
- %xmm16 = VMOVDQA32Z128rr_REV %xmm16
- ; CHECK: %xmm16 = VMOVDQA64Z128rr_REV %xmm16
- %xmm16 = VMOVDQA64Z128rr_REV %xmm16
- ; CHECK: %xmm16 = VMOVDQU16Z128rr_REV %xmm16
- %xmm16 = VMOVDQU16Z128rr_REV %xmm16
- ; CHECK: %xmm16 = VMOVDQU32Z128rr_REV %xmm16
- %xmm16 = VMOVDQU32Z128rr_REV %xmm16
- ; CHECK: %xmm16 = VMOVDQU64Z128rr_REV %xmm16
- %xmm16 = VMOVDQU64Z128rr_REV %xmm16
- ; CHECK: %xmm16 = VPMOVSXBDZ128rm %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMOVSXBDZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMOVSXBDZ128rr %xmm16
- %xmm16 = VPMOVSXBDZ128rr %xmm16
- ; CHECK: %xmm16 = VPMOVSXBQZ128rm %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMOVSXBQZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMOVSXBQZ128rr %xmm16
- %xmm16 = VPMOVSXBQZ128rr %xmm16
- ; CHECK: %xmm16 = VPMOVSXBWZ128rm %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMOVSXBWZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMOVSXBWZ128rr %xmm16
- %xmm16 = VPMOVSXBWZ128rr %xmm16
- ; CHECK: %xmm16 = VPMOVSXDQZ128rm %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMOVSXDQZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMOVSXDQZ128rr %xmm16
- %xmm16 = VPMOVSXDQZ128rr %xmm16
- ; CHECK: %xmm16 = VPMOVSXWDZ128rm %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMOVSXWDZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMOVSXWDZ128rr %xmm16
- %xmm16 = VPMOVSXWDZ128rr %xmm16
- ; CHECK: %xmm16 = VPMOVSXWQZ128rm %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMOVSXWQZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMOVSXWQZ128rr %xmm16
- %xmm16 = VPMOVSXWQZ128rr %xmm16
- ; CHECK: %xmm16 = VPMOVZXBDZ128rm %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMOVZXBDZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMOVZXBDZ128rr %xmm16
- %xmm16 = VPMOVZXBDZ128rr %xmm16
- ; CHECK: %xmm16 = VPMOVZXBQZ128rm %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMOVZXBQZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMOVZXBQZ128rr %xmm16
- %xmm16 = VPMOVZXBQZ128rr %xmm16
- ; CHECK: %xmm16 = VPMOVZXBWZ128rm %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMOVZXBWZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMOVZXBWZ128rr %xmm16
- %xmm16 = VPMOVZXBWZ128rr %xmm16
- ; CHECK: %xmm16 = VPMOVZXDQZ128rm %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMOVZXDQZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMOVZXDQZ128rr %xmm16
- %xmm16 = VPMOVZXDQZ128rr %xmm16
- ; CHECK: %xmm16 = VPMOVZXWDZ128rm %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMOVZXWDZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMOVZXWDZ128rr %xmm16
- %xmm16 = VPMOVZXWDZ128rr %xmm16
- ; CHECK: %xmm16 = VPMOVZXWQZ128rm %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMOVZXWQZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMOVZXWQZ128rr %xmm16
- %xmm16 = VPMOVZXWQZ128rr %xmm16
- ; CHECK: VMOVHPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- VMOVHPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- ; CHECK: %xmm16 = VMOVHPDZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg
- %xmm16 = VMOVHPDZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: VMOVHPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- VMOVHPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- ; CHECK: %xmm16 = VMOVHPSZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg
- %xmm16 = VMOVHPSZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: VMOVLPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- VMOVLPDZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- ; CHECK: %xmm16 = VMOVLPDZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg
- %xmm16 = VMOVLPDZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: VMOVLPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- VMOVLPSZ128mr %rdi, 1, %noreg, 0, %noreg, %xmm16
- ; CHECK: %xmm16 = VMOVLPSZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg
- %xmm16 = VMOVLPSZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VMAXCPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMAXCPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMAXCPDZ128rr %xmm16, %xmm1
- %xmm16 = VMAXCPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMAXCPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMAXCPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMAXCPSZ128rr %xmm16, %xmm1
- %xmm16 = VMAXCPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMAXPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMAXPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMAXPDZ128rr %xmm16, %xmm1
- %xmm16 = VMAXPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMAXPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMAXPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMAXPSZ128rr %xmm16, %xmm1
- %xmm16 = VMAXPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMINCPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMINCPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMINCPDZ128rr %xmm16, %xmm1
- %xmm16 = VMINCPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMINCPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMINCPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMINCPSZ128rr %xmm16, %xmm1
- %xmm16 = VMINCPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMINPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMINPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMINPDZ128rr %xmm16, %xmm1
- %xmm16 = VMINPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMINPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMINPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMINPSZ128rr %xmm16, %xmm1
- %xmm16 = VMINPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMULPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMULPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMULPDZ128rr %xmm16, %xmm1
- %xmm16 = VMULPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMULPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMULPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMULPSZ128rr %xmm16, %xmm1
- %xmm16 = VMULPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VORPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VORPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VORPDZ128rr %xmm16, %xmm1
- %xmm16 = VORPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VORPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VORPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VORPSZ128rr %xmm16, %xmm1
- %xmm16 = VORPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPADDBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPADDBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPADDBZ128rr %xmm16, %xmm1
- %xmm16 = VPADDBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPADDDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPADDDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPADDDZ128rr %xmm16, %xmm1
- %xmm16 = VPADDDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPADDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPADDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPADDQZ128rr %xmm16, %xmm1
- %xmm16 = VPADDQZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPADDSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPADDSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPADDSBZ128rr %xmm16, %xmm1
- %xmm16 = VPADDSBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPADDSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPADDSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPADDSWZ128rr %xmm16, %xmm1
- %xmm16 = VPADDSWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPADDUSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPADDUSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPADDUSBZ128rr %xmm16, %xmm1
- %xmm16 = VPADDUSBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPADDUSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPADDUSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPADDUSWZ128rr %xmm16, %xmm1
- %xmm16 = VPADDUSWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPADDWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPADDWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPADDWZ128rr %xmm16, %xmm1
- %xmm16 = VPADDWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPANDDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPANDDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPANDDZ128rr %xmm16, %xmm1
- %xmm16 = VPANDDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPANDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPANDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPANDQZ128rr %xmm16, %xmm1
- %xmm16 = VPANDQZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPANDNDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPANDNDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPANDNDZ128rr %xmm16, %xmm1
- %xmm16 = VPANDNDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPANDNQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPANDNQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPANDNQZ128rr %xmm16, %xmm1
- %xmm16 = VPANDNQZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPAVGBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPAVGBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPAVGBZ128rr %xmm16, %xmm1
- %xmm16 = VPAVGBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPAVGWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPAVGWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPAVGWZ128rr %xmm16, %xmm1
- %xmm16 = VPAVGWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMAXSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMAXSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMAXSBZ128rr %xmm16, %xmm1
- %xmm16 = VPMAXSBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMAXSDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMAXSDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMAXSDZ128rr %xmm16, %xmm1
- %xmm16 = VPMAXSDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMAXSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMAXSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMAXSWZ128rr %xmm16, %xmm1
- %xmm16 = VPMAXSWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMAXUBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMAXUBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMAXUBZ128rr %xmm16, %xmm1
- %xmm16 = VPMAXUBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMAXUDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMAXUDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMAXUDZ128rr %xmm16, %xmm1
- %xmm16 = VPMAXUDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMAXUWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMAXUWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMAXUWZ128rr %xmm16, %xmm1
- %xmm16 = VPMAXUWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMINSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMINSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMINSBZ128rr %xmm16, %xmm1
- %xmm16 = VPMINSBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMINSDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMINSDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMINSDZ128rr %xmm16, %xmm1
- %xmm16 = VPMINSDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMINSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMINSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMINSWZ128rr %xmm16, %xmm1
- %xmm16 = VPMINSWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMINUBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMINUBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMINUBZ128rr %xmm16, %xmm1
- %xmm16 = VPMINUBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMINUDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMINUDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMINUDZ128rr %xmm16, %xmm1
- %xmm16 = VPMINUDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMINUWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMINUWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMINUWZ128rr %xmm16, %xmm1
- %xmm16 = VPMINUWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMULDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMULDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMULDQZ128rr %xmm16, %xmm1
- %xmm16 = VPMULDQZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMULHRSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMULHRSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMULHRSWZ128rr %xmm16, %xmm1
- %xmm16 = VPMULHRSWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMULHUWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMULHUWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMULHUWZ128rr %xmm16, %xmm1
- %xmm16 = VPMULHUWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMULHWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMULHWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMULHWZ128rr %xmm16, %xmm1
- %xmm16 = VPMULHWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMULLDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMULLDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMULLDZ128rr %xmm16, %xmm1
- %xmm16 = VPMULLDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMULLWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMULLWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMULLWZ128rr %xmm16, %xmm1
- %xmm16 = VPMULLWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMULUDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMULUDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMULUDQZ128rr %xmm16, %xmm1
- %xmm16 = VPMULUDQZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPORDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPORDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPORDZ128rr %xmm16, %xmm1
- %xmm16 = VPORDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPORQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPORQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPORQZ128rr %xmm16, %xmm1
- %xmm16 = VPORQZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPSUBBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPSUBBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPSUBBZ128rr %xmm16, %xmm1
- %xmm16 = VPSUBBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPSUBDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPSUBDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPSUBDZ128rr %xmm16, %xmm1
- %xmm16 = VPSUBDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPSUBQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPSUBQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPSUBQZ128rr %xmm16, %xmm1
- %xmm16 = VPSUBQZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPSUBSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPSUBSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPSUBSBZ128rr %xmm16, %xmm1
- %xmm16 = VPSUBSBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPSUBSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPSUBSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPSUBSWZ128rr %xmm16, %xmm1
- %xmm16 = VPSUBSWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPSUBUSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPSUBUSBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPSUBUSBZ128rr %xmm16, %xmm1
- %xmm16 = VPSUBUSBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPSUBUSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPSUBUSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPSUBUSWZ128rr %xmm16, %xmm1
- %xmm16 = VPSUBUSWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPSUBWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPSUBWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPSUBWZ128rr %xmm16, %xmm1
- %xmm16 = VPSUBWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VADDPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VADDPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VADDPDZ128rr %xmm16, %xmm1
- %xmm16 = VADDPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VADDPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VADDPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VADDPSZ128rr %xmm16, %xmm1
- %xmm16 = VADDPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VANDNPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VANDNPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VANDNPDZ128rr %xmm16, %xmm1
- %xmm16 = VANDNPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VANDNPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VANDNPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VANDNPSZ128rr %xmm16, %xmm1
- %xmm16 = VANDNPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VANDPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VANDPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VANDPDZ128rr %xmm16, %xmm1
- %xmm16 = VANDPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VANDPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VANDPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VANDPSZ128rr %xmm16, %xmm1
- %xmm16 = VANDPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VDIVPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VDIVPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VDIVPDZ128rr %xmm16, %xmm1
- %xmm16 = VDIVPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VDIVPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VDIVPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VDIVPSZ128rr %xmm16, %xmm1
- %xmm16 = VDIVPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPXORDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPXORDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPXORDZ128rr %xmm16, %xmm1
- %xmm16 = VPXORDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPXORQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPXORQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPXORQZ128rr %xmm16, %xmm1
- %xmm16 = VPXORQZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VSUBPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VSUBPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VSUBPDZ128rr %xmm16, %xmm1
- %xmm16 = VSUBPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VSUBPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VSUBPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VSUBPSZ128rr %xmm16, %xmm1
- %xmm16 = VSUBPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VXORPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VXORPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VXORPDZ128rr %xmm16, %xmm1
- %xmm16 = VXORPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VXORPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VXORPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VXORPSZ128rr %xmm16, %xmm1
- %xmm16 = VXORPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMADDUBSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMADDUBSWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMADDUBSWZ128rr %xmm16, %xmm1
- %xmm16 = VPMADDUBSWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPMADDWDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPMADDWDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPMADDWDZ128rr %xmm16, %xmm1
- %xmm16 = VPMADDWDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPACKSSDWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPACKSSDWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPACKSSDWZ128rr %xmm16, %xmm1
- %xmm16 = VPACKSSDWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPACKSSWBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPACKSSWBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPACKSSWBZ128rr %xmm16, %xmm1
- %xmm16 = VPACKSSWBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPACKUSDWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPACKUSDWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPACKUSDWZ128rr %xmm16, %xmm1
- %xmm16 = VPACKUSDWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPACKUSWBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPACKUSWBZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPACKUSWBZ128rr %xmm16, %xmm1
- %xmm16 = VPACKUSWBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPUNPCKHBWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPUNPCKHBWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPUNPCKHBWZ128rr %xmm16, %xmm1
- %xmm16 = VPUNPCKHBWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPUNPCKHDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPUNPCKHDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPUNPCKHDQZ128rr %xmm16, %xmm1
- %xmm16 = VPUNPCKHDQZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPUNPCKHQDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPUNPCKHQDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPUNPCKHQDQZ128rr %xmm16, %xmm1
- %xmm16 = VPUNPCKHQDQZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPUNPCKHWDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPUNPCKHWDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPUNPCKHWDZ128rr %xmm16, %xmm1
- %xmm16 = VPUNPCKHWDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPUNPCKLBWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPUNPCKLBWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPUNPCKLBWZ128rr %xmm16, %xmm1
- %xmm16 = VPUNPCKLBWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPUNPCKLDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPUNPCKLDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPUNPCKLDQZ128rr %xmm16, %xmm1
- %xmm16 = VPUNPCKLDQZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPUNPCKLQDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPUNPCKLQDQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPUNPCKLQDQZ128rr %xmm16, %xmm1
- %xmm16 = VPUNPCKLQDQZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPUNPCKLWDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPUNPCKLWDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPUNPCKLWDZ128rr %xmm16, %xmm1
- %xmm16 = VPUNPCKLWDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VUNPCKHPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VUNPCKHPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VUNPCKHPDZ128rr %xmm16, %xmm1
- %xmm16 = VUNPCKHPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VUNPCKHPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VUNPCKHPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VUNPCKHPSZ128rr %xmm16, %xmm1
- %xmm16 = VUNPCKHPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VUNPCKLPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VUNPCKLPDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VUNPCKLPDZ128rr %xmm16, %xmm1
- %xmm16 = VUNPCKLPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VUNPCKLPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VUNPCKLPSZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VUNPCKLPSZ128rr %xmm16, %xmm1
- %xmm16 = VUNPCKLPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VFMADD132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMADD132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMADD132PDZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFMADD132PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADD132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMADD132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMADD132PSZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFMADD132PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADD213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMADD213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMADD213PDZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFMADD213PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADD213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMADD213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMADD213PSZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFMADD213PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADD231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMADD231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMADD231PDZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFMADD231PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADD231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMADD231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMADD231PSZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFMADD231PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADDSUB132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMADDSUB132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMADDSUB132PDZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFMADDSUB132PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADDSUB132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMADDSUB132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMADDSUB132PSZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFMADDSUB132PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADDSUB213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMADDSUB213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMADDSUB213PDZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFMADDSUB213PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADDSUB213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMADDSUB213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMADDSUB213PSZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFMADDSUB213PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADDSUB231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMADDSUB231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMADDSUB231PDZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFMADDSUB231PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADDSUB231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMADDSUB231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMADDSUB231PSZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFMADDSUB231PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMSUB132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMSUB132PDZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFMSUB132PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMSUB132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMSUB132PSZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFMSUB132PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMSUB213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMSUB213PDZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFMSUB213PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMSUB213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMSUB213PSZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFMSUB213PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMSUB231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMSUB231PDZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFMSUB231PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMSUB231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMSUB231PSZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFMSUB231PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUBADD132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMSUBADD132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMSUBADD132PDZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFMSUBADD132PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUBADD132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMSUBADD132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMSUBADD132PSZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFMSUBADD132PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUBADD213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMSUBADD213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMSUBADD213PDZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFMSUBADD213PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUBADD213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMSUBADD213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMSUBADD213PSZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFMSUBADD213PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUBADD231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMSUBADD231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMSUBADD231PDZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFMSUBADD231PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUBADD231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMSUBADD231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMSUBADD231PSZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFMSUBADD231PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMADD132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMADD132PDZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMADD132PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMADD132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMADD132PSZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMADD132PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMADD213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMADD213PDZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMADD213PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMADD213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMADD213PSZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMADD213PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMADD231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMADD231PDZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMADD231PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMADD231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMADD231PSZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMADD231PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMSUB132PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMSUB132PDZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMSUB132PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMSUB132PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMSUB132PSZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMSUB132PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMSUB213PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMSUB213PDZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMSUB213PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMSUB213PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMSUB213PSZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMSUB213PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMSUB231PDZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMSUB231PDZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMSUB231PDZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMSUB231PSZ128m %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMSUB231PSZ128r %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMSUB231PSZ128r %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VPSLLDZ128ri %xmm16, 7
- %xmm16 = VPSLLDZ128ri %xmm16, 7
- ; CHECK: %xmm16 = VPSLLDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPSLLDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPSLLDZ128rr %xmm16, 14
- %xmm16 = VPSLLDZ128rr %xmm16, 14
- ; CHECK: %xmm16 = VPSLLQZ128ri %xmm16, 7
- %xmm16 = VPSLLQZ128ri %xmm16, 7
- ; CHECK: %xmm16 = VPSLLQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPSLLQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPSLLQZ128rr %xmm16, 14
- %xmm16 = VPSLLQZ128rr %xmm16, 14
- ; CHECK: %xmm16 = VPSLLVDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPSLLVDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPSLLVDZ128rr %xmm16, 14
- %xmm16 = VPSLLVDZ128rr %xmm16, 14
- ; CHECK: %xmm16 = VPSLLVQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPSLLVQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPSLLVQZ128rr %xmm16, 14
- %xmm16 = VPSLLVQZ128rr %xmm16, 14
- ; CHECK: %xmm16 = VPSLLWZ128ri %xmm16, 7
- %xmm16 = VPSLLWZ128ri %xmm16, 7
- ; CHECK: %xmm16 = VPSLLWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPSLLWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPSLLWZ128rr %xmm16, 14
- %xmm16 = VPSLLWZ128rr %xmm16, 14
- ; CHECK: %xmm16 = VPSRADZ128ri %xmm16, 7
- %xmm16 = VPSRADZ128ri %xmm16, 7
- ; CHECK: %xmm16 = VPSRADZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPSRADZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPSRADZ128rr %xmm16, 14
- %xmm16 = VPSRADZ128rr %xmm16, 14
- ; CHECK: %xmm16 = VPSRAVDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPSRAVDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPSRAVDZ128rr %xmm16, 14
- %xmm16 = VPSRAVDZ128rr %xmm16, 14
- ; CHECK: %xmm16 = VPSRAWZ128ri %xmm16, 7
- %xmm16 = VPSRAWZ128ri %xmm16, 7
- ; CHECK: %xmm16 = VPSRAWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPSRAWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPSRAWZ128rr %xmm16, 14
- %xmm16 = VPSRAWZ128rr %xmm16, 14
- ; CHECK: %xmm16 = VPSRLDQZ128rr %xmm16, 14
- %xmm16 = VPSRLDQZ128rr %xmm16, 14
- ; CHECK: %xmm16 = VPSRLDZ128ri %xmm16, 7
- %xmm16 = VPSRLDZ128ri %xmm16, 7
- ; CHECK: %xmm16 = VPSRLDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPSRLDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPSRLDZ128rr %xmm16, 14
- %xmm16 = VPSRLDZ128rr %xmm16, 14
- ; CHECK: %xmm16 = VPSRLQZ128ri %xmm16, 7
- %xmm16 = VPSRLQZ128ri %xmm16, 7
- ; CHECK: %xmm16 = VPSRLQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPSRLQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPSRLQZ128rr %xmm16, 14
- %xmm16 = VPSRLQZ128rr %xmm16, 14
- ; CHECK: %xmm16 = VPSRLVDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPSRLVDZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPSRLVDZ128rr %xmm16, 14
- %xmm16 = VPSRLVDZ128rr %xmm16, 14
- ; CHECK: %xmm16 = VPSRLVQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPSRLVQZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPSRLVQZ128rr %xmm16, 14
- %xmm16 = VPSRLVQZ128rr %xmm16, 14
- ; CHECK: %xmm16 = VPSRLWZ128ri %xmm16, 7
- %xmm16 = VPSRLWZ128ri %xmm16, 7
- ; CHECK: %xmm16 = VPSRLWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPSRLWZ128rm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPSRLWZ128rr %xmm16, 14
- %xmm16 = VPSRLWZ128rr %xmm16, 14
- ; CHECK: %xmm16 = VPERMILPDZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
- %xmm16 = VPERMILPDZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %xmm16 = VPERMILPDZ128ri %xmm16, 9
- %xmm16 = VPERMILPDZ128ri %xmm16, 9
- ; CHECK: %xmm16 = VPERMILPDZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg
- %xmm16 = VPERMILPDZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VPERMILPDZ128rr %xmm16, %xmm1
- %xmm16 = VPERMILPDZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPERMILPSZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
- %xmm16 = VPERMILPSZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %xmm16 = VPERMILPSZ128ri %xmm16, 9
- %xmm16 = VPERMILPSZ128ri %xmm16, 9
- ; CHECK: %xmm16 = VPERMILPSZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg
- %xmm16 = VPERMILPSZ128rm %xmm16, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VPERMILPSZ128rr %xmm16, %xmm1
- %xmm16 = VPERMILPSZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VCVTPH2PSZ128rm %rdi, %xmm16, 1, %noreg, 0
- %xmm16 = VCVTPH2PSZ128rm %rdi, %xmm16, 1, %noreg, 0
- ; CHECK: %xmm16 = VCVTPH2PSZ128rr %xmm16
- %xmm16 = VCVTPH2PSZ128rr %xmm16
- ; CHECK: %xmm16 = VCVTDQ2PDZ128rm %rdi, %xmm16, 1, %noreg, 0
- %xmm16 = VCVTDQ2PDZ128rm %rdi, %xmm16, 1, %noreg, 0
- ; CHECK: %xmm16 = VCVTDQ2PDZ128rr %xmm16
- %xmm16 = VCVTDQ2PDZ128rr %xmm16
- ; CHECK: %xmm16 = VCVTDQ2PSZ128rm %rdi, %xmm16, 1, %noreg, 0
- %xmm16 = VCVTDQ2PSZ128rm %rdi, %xmm16, 1, %noreg, 0
- ; CHECK: %xmm16 = VCVTDQ2PSZ128rr %xmm16
- %xmm16 = VCVTDQ2PSZ128rr %xmm16
- ; CHECK: %xmm16 = VCVTPD2DQZ128rm %rdi, %xmm16, 1, %noreg, 0
- %xmm16 = VCVTPD2DQZ128rm %rdi, %xmm16, 1, %noreg, 0
- ; CHECK: %xmm16 = VCVTPD2DQZ128rr %xmm16
- %xmm16 = VCVTPD2DQZ128rr %xmm16
- ; CHECK: %xmm16 = VCVTPD2PSZ128rm %rdi, %xmm16, 1, %noreg, 0
- %xmm16 = VCVTPD2PSZ128rm %rdi, %xmm16, 1, %noreg, 0
- ; CHECK: %xmm16 = VCVTPD2PSZ128rr %xmm16
- %xmm16 = VCVTPD2PSZ128rr %xmm16
- ; CHECK: %xmm16 = VCVTPS2DQZ128rm %rdi, %xmm16, 1, %noreg, 0
- %xmm16 = VCVTPS2DQZ128rm %rdi, %xmm16, 1, %noreg, 0
- ; CHECK: %xmm16 = VCVTPS2DQZ128rr %xmm16
- %xmm16 = VCVTPS2DQZ128rr %xmm16
- ; CHECK: %xmm16 = VCVTPS2PDZ128rm %rdi, %xmm16, 1, %noreg, 0
- %xmm16 = VCVTPS2PDZ128rm %rdi, %xmm16, 1, %noreg, 0
- ; CHECK: %xmm16 = VCVTPS2PDZ128rr %xmm16
- %xmm16 = VCVTPS2PDZ128rr %xmm16
- ; CHECK: %xmm16 = VCVTTPD2DQZ128rm %rdi, %xmm16, 1, %noreg, 0
- %xmm16 = VCVTTPD2DQZ128rm %rdi, %xmm16, 1, %noreg, 0
- ; CHECK: %xmm16 = VCVTTPD2DQZ128rr %xmm16
- %xmm16 = VCVTTPD2DQZ128rr %xmm16
- ; CHECK: %xmm16 = VCVTTPS2DQZ128rm %rdi, %xmm16, 1, %noreg, 0
- %xmm16 = VCVTTPS2DQZ128rm %rdi, %xmm16, 1, %noreg, 0
- ; CHECK: %xmm16 = VCVTTPS2DQZ128rr %xmm16
- %xmm16 = VCVTTPS2DQZ128rr %xmm16
- ; CHECK: %xmm16 = VSQRTPDZ128m %rdi, %noreg, %noreg, %noreg, %noreg
- %xmm16 = VSQRTPDZ128m %rdi, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm16 = VSQRTPDZ128r %xmm16
- %xmm16 = VSQRTPDZ128r %xmm16
- ; CHECK: %xmm16 = VSQRTPSZ128m %rdi, %noreg, %noreg, %noreg, %noreg
- %xmm16 = VSQRTPSZ128m %rdi, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm16 = VSQRTPSZ128r %xmm16
- %xmm16 = VSQRTPSZ128r %xmm16
- ; CHECK: %xmm16 = VMOVDDUPZ128rm %rdi, 1, %noreg, 0, %noreg
- %xmm16 = VMOVDDUPZ128rm %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VMOVDDUPZ128rr %xmm16
- %xmm16 = VMOVDDUPZ128rr %xmm16
- ; CHECK: %xmm16 = VMOVSHDUPZ128rm %rdi, 1, %noreg, 0, %noreg
- %xmm16 = VMOVSHDUPZ128rm %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VMOVSHDUPZ128rr %xmm16
- %xmm16 = VMOVSHDUPZ128rr %xmm16
- ; CHECK: %xmm16 = VMOVSLDUPZ128rm %rdi, 1, %noreg, 0, %noreg
- %xmm16 = VMOVSLDUPZ128rm %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VMOVSLDUPZ128rr %xmm16
- %xmm16 = VMOVSLDUPZ128rr %xmm16
- ; CHECK: %xmm16 = VPSHUFBZ128rm %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg
- %xmm16 = VPSHUFBZ128rm %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm16 = VPSHUFBZ128rr %xmm16, %xmm1
- %xmm16 = VPSHUFBZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VPSHUFDZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
- %xmm16 = VPSHUFDZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %xmm16 = VPSHUFDZ128ri %xmm16, -24
- %xmm16 = VPSHUFDZ128ri %xmm16, -24
- ; CHECK: %xmm16 = VPSHUFHWZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
- %xmm16 = VPSHUFHWZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %xmm16 = VPSHUFHWZ128ri %xmm16, -24
- %xmm16 = VPSHUFHWZ128ri %xmm16, -24
- ; CHECK: %xmm16 = VPSHUFLWZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
- %xmm16 = VPSHUFLWZ128mi %rdi, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %xmm16 = VPSHUFLWZ128ri %xmm16, -24
- %xmm16 = VPSHUFLWZ128ri %xmm16, -24
- ; CHECK: %xmm16 = VPSLLDQZ128rr %xmm16, %xmm1
- %xmm16 = VPSLLDQZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VSHUFPDZ128rmi %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
- %xmm16 = VSHUFPDZ128rmi %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm16 = VSHUFPDZ128rri %xmm16, %noreg, %noreg
- %xmm16 = VSHUFPDZ128rri %xmm16, %noreg, %noreg
- ; CHECK: %xmm16 = VSHUFPSZ128rmi %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
- %xmm16 = VSHUFPSZ128rmi %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm16 = VSHUFPSZ128rri %xmm16, %noreg, %noreg
- %xmm16 = VSHUFPSZ128rri %xmm16, %noreg, %noreg
- ; CHECK: %xmm16 = VPSADBWZ128rm %xmm16, 1, %noreg, %rax, %noreg, %noreg
- %xmm16 = VPSADBWZ128rm %xmm16, 1, %noreg, %rax, %noreg, %noreg
- ; CHECK: %xmm16 = VPSADBWZ128rr %xmm16, %xmm1
- %xmm16 = VPSADBWZ128rr %xmm16, %xmm1
- ; CHECK: %xmm16 = VBROADCASTSSZ128m %rip, %noreg, %noreg, %noreg, %noreg
- %xmm16 = VBROADCASTSSZ128m %rip, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm16 = VBROADCASTSSZ128r %xmm16
- %xmm16 = VBROADCASTSSZ128r %xmm16
- ; CHECK: %xmm16 = VPBROADCASTBZ128m %rip, %noreg, %noreg, %noreg, %noreg
- %xmm16 = VPBROADCASTBZ128m %rip, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm16 = VPBROADCASTBZ128r %xmm16
- %xmm16 = VPBROADCASTBZ128r %xmm16
- ; CHECK: %xmm16 = VPBROADCASTDZ128m %rip, %noreg, %noreg, %noreg, %noreg
- %xmm16 = VPBROADCASTDZ128m %rip, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm16 = VPBROADCASTDZ128r %xmm16
- %xmm16 = VPBROADCASTDZ128r %xmm16
- ; CHECK: %xmm16 = VPBROADCASTQZ128m %rip, %noreg, %noreg, %noreg, %noreg
- %xmm16 = VPBROADCASTQZ128m %rip, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm16 = VPBROADCASTQZ128r %xmm16
- %xmm16 = VPBROADCASTQZ128r %xmm16
- ; CHECK: %xmm16 = VPBROADCASTWZ128m %rip, %noreg, %noreg, %noreg, %noreg
- %xmm16 = VPBROADCASTWZ128m %rip, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm16 = VPBROADCASTWZ128r %xmm16
- %xmm16 = VPBROADCASTWZ128r %xmm16
- ; CHECK: %xmm16 = VBROADCASTI32X2Z128m %rip, %noreg, %noreg, %noreg, %noreg
- %xmm16 = VBROADCASTI32X2Z128m %rip, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm16 = VBROADCASTI32X2Z128r %xmm0
- %xmm16 = VBROADCASTI32X2Z128r %xmm0
- ; CHECK: %xmm16 = VCVTPS2PHZ128rr %xmm16, 2
- %xmm16 = VCVTPS2PHZ128rr %xmm16, 2
- ; CHECK: VCVTPS2PHZ128mr %rdi, %xmm16, 1, %noreg, 0, %noreg, %noreg
- VCVTPS2PHZ128mr %rdi, %xmm16, 1, %noreg, 0, %noreg, %noreg
- ; CHECK: %xmm16 = VPABSBZ128rm %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPABSBZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPABSBZ128rr %xmm16
- %xmm16 = VPABSBZ128rr %xmm16
- ; CHECK: %xmm16 = VPABSDZ128rm %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPABSDZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPABSDZ128rr %xmm16
- %xmm16 = VPABSDZ128rr %xmm16
- ; CHECK: %xmm16 = VPABSWZ128rm %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VPABSWZ128rm %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VPABSWZ128rr %xmm16
- %xmm16 = VPABSWZ128rr %xmm16
- ; CHECK: %xmm16 = VPALIGNRZ128rmi %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
- %xmm16 = VPALIGNRZ128rmi %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm16 = VPALIGNRZ128rri %xmm16, %xmm1, 15
- %xmm16 = VPALIGNRZ128rri %xmm16, %xmm1, 15
- ; CHECK: VEXTRACTPSZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, %noreg
- VEXTRACTPSZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, %noreg
- ; CHECK: %eax = VEXTRACTPSZrr %xmm16, %noreg
- %eax = VEXTRACTPSZrr %xmm16, %noreg
- ; CHECK: %xmm16 = VINSERTPSZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, %noreg
- %xmm16 = VINSERTPSZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm16 = VINSERTPSZrr %xmm16, %xmm16, %noreg
- %xmm16 = VINSERTPSZrr %xmm16, %xmm16, %noreg
+ ; CHECK: VMOVAPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ VMOVAPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ ; CHECK: $xmm16 = VMOVAPDZ128rm $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMOVAPDZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMOVAPDZ128rr $xmm16
+ $xmm16 = VMOVAPDZ128rr $xmm16
+ ; CHECK: VMOVAPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ VMOVAPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ ; CHECK: $xmm16 = VMOVAPSZ128rm $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMOVAPSZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMOVAPSZ128rr $xmm16
+ $xmm16 = VMOVAPSZ128rr $xmm16
+ ; CHECK: VMOVDQA32Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ VMOVDQA32Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ ; CHECK: $xmm16 = VMOVDQA32Z128rm $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMOVDQA32Z128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMOVDQA32Z128rr $xmm16
+ $xmm16 = VMOVDQA32Z128rr $xmm16
+ ; CHECK: VMOVDQA64Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ VMOVDQA64Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ ; CHECK: $xmm16 = VMOVDQA64Z128rm $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMOVDQA64Z128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMOVDQA64Z128rr $xmm16
+ $xmm16 = VMOVDQA64Z128rr $xmm16
+ ; CHECK: VMOVDQU16Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ VMOVDQU16Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ ; CHECK: $xmm16 = VMOVDQU16Z128rm $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMOVDQU16Z128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMOVDQU16Z128rr $xmm16
+ $xmm16 = VMOVDQU16Z128rr $xmm16
+ ; CHECK: VMOVDQU32Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ VMOVDQU32Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ ; CHECK: $xmm16 = VMOVDQU32Z128rm $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMOVDQU32Z128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMOVDQU32Z128rr $xmm16
+ $xmm16 = VMOVDQU32Z128rr $xmm16
+ ; CHECK: VMOVDQU64Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ VMOVDQU64Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ ; CHECK: $xmm16 = VMOVDQU64Z128rm $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMOVDQU64Z128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMOVDQU64Z128rr $xmm16
+ $xmm16 = VMOVDQU64Z128rr $xmm16
+ ; CHECK: VMOVDQU8Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ VMOVDQU8Z128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ ; CHECK: $xmm16 = VMOVDQU8Z128rm $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMOVDQU8Z128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMOVDQU8Z128rr $xmm16
+ $xmm16 = VMOVDQU8Z128rr $xmm16
+ ; CHECK: $xmm16 = VMOVDQU8Z128rr_REV $xmm16
+ $xmm16 = VMOVDQU8Z128rr_REV $xmm16
+ ; CHECK: $xmm16 = VMOVNTDQAZ128rm $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMOVNTDQAZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: VMOVUPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ VMOVUPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ ; CHECK: $xmm16 = VMOVUPDZ128rm $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMOVUPDZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMOVUPDZ128rr $xmm16
+ $xmm16 = VMOVUPDZ128rr $xmm16
+ ; CHECK: $xmm16 = VMOVUPDZ128rr_REV $xmm16
+ $xmm16 = VMOVUPDZ128rr_REV $xmm16
+ ; CHECK: VMOVUPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ VMOVUPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ ; CHECK: $xmm16 = VMOVUPSZ128rm $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMOVUPSZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMOVUPSZ128rr $xmm16
+ $xmm16 = VMOVUPSZ128rr $xmm16
+ ; CHECK: $xmm16 = VMOVUPSZ128rr_REV $xmm16
+ $xmm16 = VMOVUPSZ128rr_REV $xmm16
+ ; CHECK: VMOVNTDQZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ VMOVNTDQZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ ; CHECK: VMOVNTPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ VMOVNTPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ ; CHECK: VMOVNTPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ VMOVNTPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ ; CHECK: $xmm16 = VMOVAPDZ128rr_REV $xmm16
+ $xmm16 = VMOVAPDZ128rr_REV $xmm16
+ ; CHECK: $xmm16 = VMOVAPSZ128rr_REV $xmm16
+ $xmm16 = VMOVAPSZ128rr_REV $xmm16
+ ; CHECK: $xmm16 = VMOVDQA32Z128rr_REV $xmm16
+ $xmm16 = VMOVDQA32Z128rr_REV $xmm16
+ ; CHECK: $xmm16 = VMOVDQA64Z128rr_REV $xmm16
+ $xmm16 = VMOVDQA64Z128rr_REV $xmm16
+ ; CHECK: $xmm16 = VMOVDQU16Z128rr_REV $xmm16
+ $xmm16 = VMOVDQU16Z128rr_REV $xmm16
+ ; CHECK: $xmm16 = VMOVDQU32Z128rr_REV $xmm16
+ $xmm16 = VMOVDQU32Z128rr_REV $xmm16
+ ; CHECK: $xmm16 = VMOVDQU64Z128rr_REV $xmm16
+ $xmm16 = VMOVDQU64Z128rr_REV $xmm16
+ ; CHECK: $xmm16 = VPMOVSXBDZ128rm $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMOVSXBDZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMOVSXBDZ128rr $xmm16
+ $xmm16 = VPMOVSXBDZ128rr $xmm16
+ ; CHECK: $xmm16 = VPMOVSXBQZ128rm $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMOVSXBQZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMOVSXBQZ128rr $xmm16
+ $xmm16 = VPMOVSXBQZ128rr $xmm16
+ ; CHECK: $xmm16 = VPMOVSXBWZ128rm $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMOVSXBWZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMOVSXBWZ128rr $xmm16
+ $xmm16 = VPMOVSXBWZ128rr $xmm16
+ ; CHECK: $xmm16 = VPMOVSXDQZ128rm $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMOVSXDQZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMOVSXDQZ128rr $xmm16
+ $xmm16 = VPMOVSXDQZ128rr $xmm16
+ ; CHECK: $xmm16 = VPMOVSXWDZ128rm $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMOVSXWDZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMOVSXWDZ128rr $xmm16
+ $xmm16 = VPMOVSXWDZ128rr $xmm16
+ ; CHECK: $xmm16 = VPMOVSXWQZ128rm $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMOVSXWQZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMOVSXWQZ128rr $xmm16
+ $xmm16 = VPMOVSXWQZ128rr $xmm16
+ ; CHECK: $xmm16 = VPMOVZXBDZ128rm $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMOVZXBDZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMOVZXBDZ128rr $xmm16
+ $xmm16 = VPMOVZXBDZ128rr $xmm16
+ ; CHECK: $xmm16 = VPMOVZXBQZ128rm $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMOVZXBQZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMOVZXBQZ128rr $xmm16
+ $xmm16 = VPMOVZXBQZ128rr $xmm16
+ ; CHECK: $xmm16 = VPMOVZXBWZ128rm $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMOVZXBWZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMOVZXBWZ128rr $xmm16
+ $xmm16 = VPMOVZXBWZ128rr $xmm16
+ ; CHECK: $xmm16 = VPMOVZXDQZ128rm $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMOVZXDQZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMOVZXDQZ128rr $xmm16
+ $xmm16 = VPMOVZXDQZ128rr $xmm16
+ ; CHECK: $xmm16 = VPMOVZXWDZ128rm $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMOVZXWDZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMOVZXWDZ128rr $xmm16
+ $xmm16 = VPMOVZXWDZ128rr $xmm16
+ ; CHECK: $xmm16 = VPMOVZXWQZ128rm $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMOVZXWQZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMOVZXWQZ128rr $xmm16
+ $xmm16 = VPMOVZXWQZ128rr $xmm16
+ ; CHECK: VMOVHPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ VMOVHPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ ; CHECK: $xmm16 = VMOVHPDZ128rm $xmm16, $rdi, 1, $noreg, 0, $noreg
+ $xmm16 = VMOVHPDZ128rm $xmm16, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: VMOVHPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ VMOVHPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ ; CHECK: $xmm16 = VMOVHPSZ128rm $xmm16, $rdi, 1, $noreg, 0, $noreg
+ $xmm16 = VMOVHPSZ128rm $xmm16, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: VMOVLPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ VMOVLPDZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ ; CHECK: $xmm16 = VMOVLPDZ128rm $xmm16, $rdi, 1, $noreg, 0, $noreg
+ $xmm16 = VMOVLPDZ128rm $xmm16, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: VMOVLPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ VMOVLPSZ128mr $rdi, 1, $noreg, 0, $noreg, $xmm16
+ ; CHECK: $xmm16 = VMOVLPSZ128rm $xmm16, $rdi, 1, $noreg, 0, $noreg
+ $xmm16 = VMOVLPSZ128rm $xmm16, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VMAXCPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMAXCPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMAXCPDZ128rr $xmm16, $xmm1
+ $xmm16 = VMAXCPDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VMAXCPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMAXCPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMAXCPSZ128rr $xmm16, $xmm1
+ $xmm16 = VMAXCPSZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VMAXPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMAXPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMAXPDZ128rr $xmm16, $xmm1
+ $xmm16 = VMAXPDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VMAXPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMAXPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMAXPSZ128rr $xmm16, $xmm1
+ $xmm16 = VMAXPSZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VMINCPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMINCPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMINCPDZ128rr $xmm16, $xmm1
+ $xmm16 = VMINCPDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VMINCPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMINCPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMINCPSZ128rr $xmm16, $xmm1
+ $xmm16 = VMINCPSZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VMINPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMINPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMINPDZ128rr $xmm16, $xmm1
+ $xmm16 = VMINPDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VMINPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMINPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMINPSZ128rr $xmm16, $xmm1
+ $xmm16 = VMINPSZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VMULPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMULPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMULPDZ128rr $xmm16, $xmm1
+ $xmm16 = VMULPDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VMULPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMULPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMULPSZ128rr $xmm16, $xmm1
+ $xmm16 = VMULPSZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VORPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VORPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VORPDZ128rr $xmm16, $xmm1
+ $xmm16 = VORPDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VORPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VORPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VORPSZ128rr $xmm16, $xmm1
+ $xmm16 = VORPSZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPADDBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPADDBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPADDBZ128rr $xmm16, $xmm1
+ $xmm16 = VPADDBZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPADDDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPADDDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPADDDZ128rr $xmm16, $xmm1
+ $xmm16 = VPADDDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPADDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPADDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPADDQZ128rr $xmm16, $xmm1
+ $xmm16 = VPADDQZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPADDSBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPADDSBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPADDSBZ128rr $xmm16, $xmm1
+ $xmm16 = VPADDSBZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPADDSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPADDSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPADDSWZ128rr $xmm16, $xmm1
+ $xmm16 = VPADDSWZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPADDUSBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPADDUSBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPADDUSBZ128rr $xmm16, $xmm1
+ $xmm16 = VPADDUSBZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPADDUSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPADDUSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPADDUSWZ128rr $xmm16, $xmm1
+ $xmm16 = VPADDUSWZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPADDWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPADDWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPADDWZ128rr $xmm16, $xmm1
+ $xmm16 = VPADDWZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPANDDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPANDDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPANDDZ128rr $xmm16, $xmm1
+ $xmm16 = VPANDDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPANDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPANDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPANDQZ128rr $xmm16, $xmm1
+ $xmm16 = VPANDQZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPANDNDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPANDNDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPANDNDZ128rr $xmm16, $xmm1
+ $xmm16 = VPANDNDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPANDNQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPANDNQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPANDNQZ128rr $xmm16, $xmm1
+ $xmm16 = VPANDNQZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPAVGBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPAVGBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPAVGBZ128rr $xmm16, $xmm1
+ $xmm16 = VPAVGBZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPAVGWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPAVGWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPAVGWZ128rr $xmm16, $xmm1
+ $xmm16 = VPAVGWZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPMAXSBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMAXSBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMAXSBZ128rr $xmm16, $xmm1
+ $xmm16 = VPMAXSBZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPMAXSDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMAXSDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMAXSDZ128rr $xmm16, $xmm1
+ $xmm16 = VPMAXSDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPMAXSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMAXSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMAXSWZ128rr $xmm16, $xmm1
+ $xmm16 = VPMAXSWZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPMAXUBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMAXUBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMAXUBZ128rr $xmm16, $xmm1
+ $xmm16 = VPMAXUBZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPMAXUDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMAXUDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMAXUDZ128rr $xmm16, $xmm1
+ $xmm16 = VPMAXUDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPMAXUWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMAXUWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMAXUWZ128rr $xmm16, $xmm1
+ $xmm16 = VPMAXUWZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPMINSBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMINSBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMINSBZ128rr $xmm16, $xmm1
+ $xmm16 = VPMINSBZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPMINSDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMINSDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMINSDZ128rr $xmm16, $xmm1
+ $xmm16 = VPMINSDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPMINSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMINSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMINSWZ128rr $xmm16, $xmm1
+ $xmm16 = VPMINSWZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPMINUBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMINUBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMINUBZ128rr $xmm16, $xmm1
+ $xmm16 = VPMINUBZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPMINUDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMINUDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMINUDZ128rr $xmm16, $xmm1
+ $xmm16 = VPMINUDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPMINUWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMINUWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMINUWZ128rr $xmm16, $xmm1
+ $xmm16 = VPMINUWZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPMULDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMULDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMULDQZ128rr $xmm16, $xmm1
+ $xmm16 = VPMULDQZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPMULHRSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMULHRSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMULHRSWZ128rr $xmm16, $xmm1
+ $xmm16 = VPMULHRSWZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPMULHUWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMULHUWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMULHUWZ128rr $xmm16, $xmm1
+ $xmm16 = VPMULHUWZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPMULHWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMULHWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMULHWZ128rr $xmm16, $xmm1
+ $xmm16 = VPMULHWZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPMULLDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMULLDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMULLDZ128rr $xmm16, $xmm1
+ $xmm16 = VPMULLDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPMULLWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMULLWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMULLWZ128rr $xmm16, $xmm1
+ $xmm16 = VPMULLWZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPMULUDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMULUDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMULUDQZ128rr $xmm16, $xmm1
+ $xmm16 = VPMULUDQZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPORDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPORDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPORDZ128rr $xmm16, $xmm1
+ $xmm16 = VPORDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPORQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPORQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPORQZ128rr $xmm16, $xmm1
+ $xmm16 = VPORQZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPSUBBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPSUBBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPSUBBZ128rr $xmm16, $xmm1
+ $xmm16 = VPSUBBZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPSUBDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPSUBDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPSUBDZ128rr $xmm16, $xmm1
+ $xmm16 = VPSUBDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPSUBQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPSUBQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPSUBQZ128rr $xmm16, $xmm1
+ $xmm16 = VPSUBQZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPSUBSBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPSUBSBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPSUBSBZ128rr $xmm16, $xmm1
+ $xmm16 = VPSUBSBZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPSUBSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPSUBSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPSUBSWZ128rr $xmm16, $xmm1
+ $xmm16 = VPSUBSWZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPSUBUSBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPSUBUSBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPSUBUSBZ128rr $xmm16, $xmm1
+ $xmm16 = VPSUBUSBZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPSUBUSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPSUBUSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPSUBUSWZ128rr $xmm16, $xmm1
+ $xmm16 = VPSUBUSWZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPSUBWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPSUBWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPSUBWZ128rr $xmm16, $xmm1
+ $xmm16 = VPSUBWZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VADDPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VADDPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VADDPDZ128rr $xmm16, $xmm1
+ $xmm16 = VADDPDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VADDPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VADDPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VADDPSZ128rr $xmm16, $xmm1
+ $xmm16 = VADDPSZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VANDNPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VANDNPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VANDNPDZ128rr $xmm16, $xmm1
+ $xmm16 = VANDNPDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VANDNPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VANDNPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VANDNPSZ128rr $xmm16, $xmm1
+ $xmm16 = VANDNPSZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VANDPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VANDPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VANDPDZ128rr $xmm16, $xmm1
+ $xmm16 = VANDPDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VANDPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VANDPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VANDPSZ128rr $xmm16, $xmm1
+ $xmm16 = VANDPSZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VDIVPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VDIVPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VDIVPDZ128rr $xmm16, $xmm1
+ $xmm16 = VDIVPDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VDIVPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VDIVPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VDIVPSZ128rr $xmm16, $xmm1
+ $xmm16 = VDIVPSZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPXORDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPXORDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPXORDZ128rr $xmm16, $xmm1
+ $xmm16 = VPXORDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPXORQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPXORQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPXORQZ128rr $xmm16, $xmm1
+ $xmm16 = VPXORQZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VSUBPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VSUBPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VSUBPDZ128rr $xmm16, $xmm1
+ $xmm16 = VSUBPDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VSUBPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VSUBPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VSUBPSZ128rr $xmm16, $xmm1
+ $xmm16 = VSUBPSZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VXORPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VXORPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VXORPDZ128rr $xmm16, $xmm1
+ $xmm16 = VXORPDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VXORPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VXORPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VXORPSZ128rr $xmm16, $xmm1
+ $xmm16 = VXORPSZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPMADDUBSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMADDUBSWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMADDUBSWZ128rr $xmm16, $xmm1
+ $xmm16 = VPMADDUBSWZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPMADDWDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPMADDWDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPMADDWDZ128rr $xmm16, $xmm1
+ $xmm16 = VPMADDWDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPACKSSDWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPACKSSDWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPACKSSDWZ128rr $xmm16, $xmm1
+ $xmm16 = VPACKSSDWZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPACKSSWBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPACKSSWBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPACKSSWBZ128rr $xmm16, $xmm1
+ $xmm16 = VPACKSSWBZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPACKUSDWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPACKUSDWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPACKUSDWZ128rr $xmm16, $xmm1
+ $xmm16 = VPACKUSDWZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPACKUSWBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPACKUSWBZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPACKUSWBZ128rr $xmm16, $xmm1
+ $xmm16 = VPACKUSWBZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPUNPCKHBWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPUNPCKHBWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPUNPCKHBWZ128rr $xmm16, $xmm1
+ $xmm16 = VPUNPCKHBWZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPUNPCKHDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPUNPCKHDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPUNPCKHDQZ128rr $xmm16, $xmm1
+ $xmm16 = VPUNPCKHDQZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPUNPCKHQDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPUNPCKHQDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPUNPCKHQDQZ128rr $xmm16, $xmm1
+ $xmm16 = VPUNPCKHQDQZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPUNPCKHWDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPUNPCKHWDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPUNPCKHWDZ128rr $xmm16, $xmm1
+ $xmm16 = VPUNPCKHWDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPUNPCKLBWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPUNPCKLBWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPUNPCKLBWZ128rr $xmm16, $xmm1
+ $xmm16 = VPUNPCKLBWZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPUNPCKLDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPUNPCKLDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPUNPCKLDQZ128rr $xmm16, $xmm1
+ $xmm16 = VPUNPCKLDQZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPUNPCKLQDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPUNPCKLQDQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPUNPCKLQDQZ128rr $xmm16, $xmm1
+ $xmm16 = VPUNPCKLQDQZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPUNPCKLWDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPUNPCKLWDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPUNPCKLWDZ128rr $xmm16, $xmm1
+ $xmm16 = VPUNPCKLWDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VUNPCKHPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VUNPCKHPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VUNPCKHPDZ128rr $xmm16, $xmm1
+ $xmm16 = VUNPCKHPDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VUNPCKHPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VUNPCKHPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VUNPCKHPSZ128rr $xmm16, $xmm1
+ $xmm16 = VUNPCKHPSZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VUNPCKLPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VUNPCKLPDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VUNPCKLPDZ128rr $xmm16, $xmm1
+ $xmm16 = VUNPCKLPDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VUNPCKLPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VUNPCKLPSZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VUNPCKLPSZ128rr $xmm16, $xmm1
+ $xmm16 = VUNPCKLPSZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VFMADD132PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMADD132PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMADD132PDZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMADD132PDZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMADD132PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMADD132PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMADD132PSZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMADD132PSZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMADD213PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMADD213PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMADD213PDZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMADD213PDZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMADD213PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMADD213PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMADD213PSZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMADD213PSZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMADD231PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMADD231PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMADD231PDZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMADD231PDZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMADD231PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMADD231PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMADD231PSZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMADD231PSZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMADDSUB132PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMADDSUB132PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMADDSUB132PDZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMADDSUB132PDZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMADDSUB132PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMADDSUB132PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMADDSUB132PSZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMADDSUB132PSZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMADDSUB213PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMADDSUB213PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMADDSUB213PDZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMADDSUB213PDZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMADDSUB213PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMADDSUB213PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMADDSUB213PSZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMADDSUB213PSZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMADDSUB231PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMADDSUB231PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMADDSUB231PDZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMADDSUB231PDZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMADDSUB231PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMADDSUB231PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMADDSUB231PSZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMADDSUB231PSZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMSUB132PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMSUB132PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMSUB132PDZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMSUB132PDZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMSUB132PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMSUB132PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMSUB132PSZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMSUB132PSZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMSUB213PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMSUB213PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMSUB213PDZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMSUB213PDZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMSUB213PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMSUB213PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMSUB213PSZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMSUB213PSZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMSUB231PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMSUB231PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMSUB231PDZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMSUB231PDZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMSUB231PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMSUB231PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMSUB231PSZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMSUB231PSZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMSUBADD132PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMSUBADD132PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMSUBADD132PDZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMSUBADD132PDZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMSUBADD132PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMSUBADD132PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMSUBADD132PSZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMSUBADD132PSZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMSUBADD213PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMSUBADD213PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMSUBADD213PDZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMSUBADD213PDZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMSUBADD213PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMSUBADD213PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMSUBADD213PSZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMSUBADD213PSZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMSUBADD231PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMSUBADD231PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMSUBADD231PDZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMSUBADD231PDZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMSUBADD231PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMSUBADD231PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMSUBADD231PSZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMSUBADD231PSZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMADD132PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMADD132PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMADD132PDZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMADD132PDZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMADD132PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMADD132PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMADD132PSZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMADD132PSZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMADD213PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMADD213PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMADD213PDZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMADD213PDZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMADD213PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMADD213PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMADD213PSZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMADD213PSZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMADD231PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMADD231PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMADD231PDZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMADD231PDZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMADD231PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMADD231PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMADD231PSZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMADD231PSZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMSUB132PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMSUB132PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMSUB132PDZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMSUB132PDZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMSUB132PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMSUB132PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMSUB132PSZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMSUB132PSZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMSUB213PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMSUB213PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMSUB213PDZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMSUB213PDZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMSUB213PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMSUB213PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMSUB213PSZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMSUB213PSZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMSUB231PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMSUB231PDZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMSUB231PDZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMSUB231PDZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMSUB231PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMSUB231PSZ128m $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMSUB231PSZ128r $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMSUB231PSZ128r $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VPSLLDZ128ri $xmm16, 7
+ $xmm16 = VPSLLDZ128ri $xmm16, 7
+ ; CHECK: $xmm16 = VPSLLDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPSLLDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPSLLDZ128rr $xmm16, 14
+ $xmm16 = VPSLLDZ128rr $xmm16, 14
+ ; CHECK: $xmm16 = VPSLLQZ128ri $xmm16, 7
+ $xmm16 = VPSLLQZ128ri $xmm16, 7
+ ; CHECK: $xmm16 = VPSLLQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPSLLQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPSLLQZ128rr $xmm16, 14
+ $xmm16 = VPSLLQZ128rr $xmm16, 14
+ ; CHECK: $xmm16 = VPSLLVDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPSLLVDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPSLLVDZ128rr $xmm16, 14
+ $xmm16 = VPSLLVDZ128rr $xmm16, 14
+ ; CHECK: $xmm16 = VPSLLVQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPSLLVQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPSLLVQZ128rr $xmm16, 14
+ $xmm16 = VPSLLVQZ128rr $xmm16, 14
+ ; CHECK: $xmm16 = VPSLLWZ128ri $xmm16, 7
+ $xmm16 = VPSLLWZ128ri $xmm16, 7
+ ; CHECK: $xmm16 = VPSLLWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPSLLWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPSLLWZ128rr $xmm16, 14
+ $xmm16 = VPSLLWZ128rr $xmm16, 14
+ ; CHECK: $xmm16 = VPSRADZ128ri $xmm16, 7
+ $xmm16 = VPSRADZ128ri $xmm16, 7
+ ; CHECK: $xmm16 = VPSRADZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPSRADZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPSRADZ128rr $xmm16, 14
+ $xmm16 = VPSRADZ128rr $xmm16, 14
+ ; CHECK: $xmm16 = VPSRAVDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPSRAVDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPSRAVDZ128rr $xmm16, 14
+ $xmm16 = VPSRAVDZ128rr $xmm16, 14
+ ; CHECK: $xmm16 = VPSRAWZ128ri $xmm16, 7
+ $xmm16 = VPSRAWZ128ri $xmm16, 7
+ ; CHECK: $xmm16 = VPSRAWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPSRAWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPSRAWZ128rr $xmm16, 14
+ $xmm16 = VPSRAWZ128rr $xmm16, 14
+ ; CHECK: $xmm16 = VPSRLDQZ128rr $xmm16, 14
+ $xmm16 = VPSRLDQZ128rr $xmm16, 14
+ ; CHECK: $xmm16 = VPSRLDZ128ri $xmm16, 7
+ $xmm16 = VPSRLDZ128ri $xmm16, 7
+ ; CHECK: $xmm16 = VPSRLDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPSRLDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPSRLDZ128rr $xmm16, 14
+ $xmm16 = VPSRLDZ128rr $xmm16, 14
+ ; CHECK: $xmm16 = VPSRLQZ128ri $xmm16, 7
+ $xmm16 = VPSRLQZ128ri $xmm16, 7
+ ; CHECK: $xmm16 = VPSRLQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPSRLQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPSRLQZ128rr $xmm16, 14
+ $xmm16 = VPSRLQZ128rr $xmm16, 14
+ ; CHECK: $xmm16 = VPSRLVDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPSRLVDZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPSRLVDZ128rr $xmm16, 14
+ $xmm16 = VPSRLVDZ128rr $xmm16, 14
+ ; CHECK: $xmm16 = VPSRLVQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPSRLVQZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPSRLVQZ128rr $xmm16, 14
+ $xmm16 = VPSRLVQZ128rr $xmm16, 14
+ ; CHECK: $xmm16 = VPSRLWZ128ri $xmm16, 7
+ $xmm16 = VPSRLWZ128ri $xmm16, 7
+ ; CHECK: $xmm16 = VPSRLWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPSRLWZ128rm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPSRLWZ128rr $xmm16, 14
+ $xmm16 = VPSRLWZ128rr $xmm16, 14
+ ; CHECK: $xmm16 = VPERMILPDZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ $xmm16 = VPERMILPDZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $xmm16 = VPERMILPDZ128ri $xmm16, 9
+ $xmm16 = VPERMILPDZ128ri $xmm16, 9
+ ; CHECK: $xmm16 = VPERMILPDZ128rm $xmm16, $rdi, 1, $noreg, 0, $noreg
+ $xmm16 = VPERMILPDZ128rm $xmm16, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VPERMILPDZ128rr $xmm16, $xmm1
+ $xmm16 = VPERMILPDZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPERMILPSZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ $xmm16 = VPERMILPSZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $xmm16 = VPERMILPSZ128ri $xmm16, 9
+ $xmm16 = VPERMILPSZ128ri $xmm16, 9
+ ; CHECK: $xmm16 = VPERMILPSZ128rm $xmm16, $rdi, 1, $noreg, 0, $noreg
+ $xmm16 = VPERMILPSZ128rm $xmm16, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VPERMILPSZ128rr $xmm16, $xmm1
+ $xmm16 = VPERMILPSZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VCVTPH2PSZ128rm $rdi, $xmm16, 1, $noreg, 0
+ $xmm16 = VCVTPH2PSZ128rm $rdi, $xmm16, 1, $noreg, 0
+ ; CHECK: $xmm16 = VCVTPH2PSZ128rr $xmm16
+ $xmm16 = VCVTPH2PSZ128rr $xmm16
+ ; CHECK: $xmm16 = VCVTDQ2PDZ128rm $rdi, $xmm16, 1, $noreg, 0
+ $xmm16 = VCVTDQ2PDZ128rm $rdi, $xmm16, 1, $noreg, 0
+ ; CHECK: $xmm16 = VCVTDQ2PDZ128rr $xmm16
+ $xmm16 = VCVTDQ2PDZ128rr $xmm16
+ ; CHECK: $xmm16 = VCVTDQ2PSZ128rm $rdi, $xmm16, 1, $noreg, 0
+ $xmm16 = VCVTDQ2PSZ128rm $rdi, $xmm16, 1, $noreg, 0
+ ; CHECK: $xmm16 = VCVTDQ2PSZ128rr $xmm16
+ $xmm16 = VCVTDQ2PSZ128rr $xmm16
+ ; CHECK: $xmm16 = VCVTPD2DQZ128rm $rdi, $xmm16, 1, $noreg, 0
+ $xmm16 = VCVTPD2DQZ128rm $rdi, $xmm16, 1, $noreg, 0
+ ; CHECK: $xmm16 = VCVTPD2DQZ128rr $xmm16
+ $xmm16 = VCVTPD2DQZ128rr $xmm16
+ ; CHECK: $xmm16 = VCVTPD2PSZ128rm $rdi, $xmm16, 1, $noreg, 0
+ $xmm16 = VCVTPD2PSZ128rm $rdi, $xmm16, 1, $noreg, 0
+ ; CHECK: $xmm16 = VCVTPD2PSZ128rr $xmm16
+ $xmm16 = VCVTPD2PSZ128rr $xmm16
+ ; CHECK: $xmm16 = VCVTPS2DQZ128rm $rdi, $xmm16, 1, $noreg, 0
+ $xmm16 = VCVTPS2DQZ128rm $rdi, $xmm16, 1, $noreg, 0
+ ; CHECK: $xmm16 = VCVTPS2DQZ128rr $xmm16
+ $xmm16 = VCVTPS2DQZ128rr $xmm16
+ ; CHECK: $xmm16 = VCVTPS2PDZ128rm $rdi, $xmm16, 1, $noreg, 0
+ $xmm16 = VCVTPS2PDZ128rm $rdi, $xmm16, 1, $noreg, 0
+ ; CHECK: $xmm16 = VCVTPS2PDZ128rr $xmm16
+ $xmm16 = VCVTPS2PDZ128rr $xmm16
+ ; CHECK: $xmm16 = VCVTTPD2DQZ128rm $rdi, $xmm16, 1, $noreg, 0
+ $xmm16 = VCVTTPD2DQZ128rm $rdi, $xmm16, 1, $noreg, 0
+ ; CHECK: $xmm16 = VCVTTPD2DQZ128rr $xmm16
+ $xmm16 = VCVTTPD2DQZ128rr $xmm16
+ ; CHECK: $xmm16 = VCVTTPS2DQZ128rm $rdi, $xmm16, 1, $noreg, 0
+ $xmm16 = VCVTTPS2DQZ128rm $rdi, $xmm16, 1, $noreg, 0
+ ; CHECK: $xmm16 = VCVTTPS2DQZ128rr $xmm16
+ $xmm16 = VCVTTPS2DQZ128rr $xmm16
+ ; CHECK: $xmm16 = VSQRTPDZ128m $rdi, $noreg, $noreg, $noreg, $noreg
+ $xmm16 = VSQRTPDZ128m $rdi, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm16 = VSQRTPDZ128r $xmm16
+ $xmm16 = VSQRTPDZ128r $xmm16
+ ; CHECK: $xmm16 = VSQRTPSZ128m $rdi, $noreg, $noreg, $noreg, $noreg
+ $xmm16 = VSQRTPSZ128m $rdi, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm16 = VSQRTPSZ128r $xmm16
+ $xmm16 = VSQRTPSZ128r $xmm16
+ ; CHECK: $xmm16 = VMOVDDUPZ128rm $rdi, 1, $noreg, 0, $noreg
+ $xmm16 = VMOVDDUPZ128rm $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VMOVDDUPZ128rr $xmm16
+ $xmm16 = VMOVDDUPZ128rr $xmm16
+ ; CHECK: $xmm16 = VMOVSHDUPZ128rm $rdi, 1, $noreg, 0, $noreg
+ $xmm16 = VMOVSHDUPZ128rm $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VMOVSHDUPZ128rr $xmm16
+ $xmm16 = VMOVSHDUPZ128rr $xmm16
+ ; CHECK: $xmm16 = VMOVSLDUPZ128rm $rdi, 1, $noreg, 0, $noreg
+ $xmm16 = VMOVSLDUPZ128rm $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VMOVSLDUPZ128rr $xmm16
+ $xmm16 = VMOVSLDUPZ128rr $xmm16
+ ; CHECK: $xmm16 = VPSHUFBZ128rm $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg
+ $xmm16 = VPSHUFBZ128rm $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm16 = VPSHUFBZ128rr $xmm16, $xmm1
+ $xmm16 = VPSHUFBZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VPSHUFDZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ $xmm16 = VPSHUFDZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $xmm16 = VPSHUFDZ128ri $xmm16, -24
+ $xmm16 = VPSHUFDZ128ri $xmm16, -24
+ ; CHECK: $xmm16 = VPSHUFHWZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ $xmm16 = VPSHUFHWZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $xmm16 = VPSHUFHWZ128ri $xmm16, -24
+ $xmm16 = VPSHUFHWZ128ri $xmm16, -24
+ ; CHECK: $xmm16 = VPSHUFLWZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ $xmm16 = VPSHUFLWZ128mi $rdi, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $xmm16 = VPSHUFLWZ128ri $xmm16, -24
+ $xmm16 = VPSHUFLWZ128ri $xmm16, -24
+ ; CHECK: $xmm16 = VPSLLDQZ128rr $xmm16, $xmm1
+ $xmm16 = VPSLLDQZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VSHUFPDZ128rmi $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg
+ $xmm16 = VSHUFPDZ128rmi $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm16 = VSHUFPDZ128rri $xmm16, $noreg, $noreg
+ $xmm16 = VSHUFPDZ128rri $xmm16, $noreg, $noreg
+ ; CHECK: $xmm16 = VSHUFPSZ128rmi $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg
+ $xmm16 = VSHUFPSZ128rmi $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm16 = VSHUFPSZ128rri $xmm16, $noreg, $noreg
+ $xmm16 = VSHUFPSZ128rri $xmm16, $noreg, $noreg
+ ; CHECK: $xmm16 = VPSADBWZ128rm $xmm16, 1, $noreg, $rax, $noreg, $noreg
+ $xmm16 = VPSADBWZ128rm $xmm16, 1, $noreg, $rax, $noreg, $noreg
+ ; CHECK: $xmm16 = VPSADBWZ128rr $xmm16, $xmm1
+ $xmm16 = VPSADBWZ128rr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VBROADCASTSSZ128m $rip, $noreg, $noreg, $noreg, $noreg
+ $xmm16 = VBROADCASTSSZ128m $rip, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm16 = VBROADCASTSSZ128r $xmm16
+ $xmm16 = VBROADCASTSSZ128r $xmm16
+ ; CHECK: $xmm16 = VPBROADCASTBZ128m $rip, $noreg, $noreg, $noreg, $noreg
+ $xmm16 = VPBROADCASTBZ128m $rip, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm16 = VPBROADCASTBZ128r $xmm16
+ $xmm16 = VPBROADCASTBZ128r $xmm16
+ ; CHECK: $xmm16 = VPBROADCASTDZ128m $rip, $noreg, $noreg, $noreg, $noreg
+ $xmm16 = VPBROADCASTDZ128m $rip, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm16 = VPBROADCASTDZ128r $xmm16
+ $xmm16 = VPBROADCASTDZ128r $xmm16
+ ; CHECK: $xmm16 = VPBROADCASTQZ128m $rip, $noreg, $noreg, $noreg, $noreg
+ $xmm16 = VPBROADCASTQZ128m $rip, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm16 = VPBROADCASTQZ128r $xmm16
+ $xmm16 = VPBROADCASTQZ128r $xmm16
+ ; CHECK: $xmm16 = VPBROADCASTWZ128m $rip, $noreg, $noreg, $noreg, $noreg
+ $xmm16 = VPBROADCASTWZ128m $rip, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm16 = VPBROADCASTWZ128r $xmm16
+ $xmm16 = VPBROADCASTWZ128r $xmm16
+ ; CHECK: $xmm16 = VBROADCASTI32X2Z128m $rip, $noreg, $noreg, $noreg, $noreg
+ $xmm16 = VBROADCASTI32X2Z128m $rip, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm16 = VBROADCASTI32X2Z128r $xmm0
+ $xmm16 = VBROADCASTI32X2Z128r $xmm0
+ ; CHECK: $xmm16 = VCVTPS2PHZ128rr $xmm16, 2
+ $xmm16 = VCVTPS2PHZ128rr $xmm16, 2
+ ; CHECK: VCVTPS2PHZ128mr $rdi, $xmm16, 1, $noreg, 0, $noreg, $noreg
+ VCVTPS2PHZ128mr $rdi, $xmm16, 1, $noreg, 0, $noreg, $noreg
+ ; CHECK: $xmm16 = VPABSBZ128rm $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPABSBZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPABSBZ128rr $xmm16
+ $xmm16 = VPABSBZ128rr $xmm16
+ ; CHECK: $xmm16 = VPABSDZ128rm $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPABSDZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPABSDZ128rr $xmm16
+ $xmm16 = VPABSDZ128rr $xmm16
+ ; CHECK: $xmm16 = VPABSWZ128rm $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VPABSWZ128rm $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VPABSWZ128rr $xmm16
+ $xmm16 = VPABSWZ128rr $xmm16
+ ; CHECK: $xmm16 = VPALIGNRZ128rmi $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg
+ $xmm16 = VPALIGNRZ128rmi $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm16 = VPALIGNRZ128rri $xmm16, $xmm1, 15
+ $xmm16 = VPALIGNRZ128rri $xmm16, $xmm1, 15
+ ; CHECK: VEXTRACTPSZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, $noreg
+ VEXTRACTPSZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, $noreg
+ ; CHECK: $eax = VEXTRACTPSZrr $xmm16, $noreg
+ $eax = VEXTRACTPSZrr $xmm16, $noreg
+ ; CHECK: $xmm16 = VINSERTPSZrm $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, $noreg
+ $xmm16 = VINSERTPSZrm $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm16 = VINSERTPSZrr $xmm16, $xmm16, $noreg
+ $xmm16 = VINSERTPSZrr $xmm16, $xmm16, $noreg
- RET 0, %zmm0, %zmm1
+ RET 0, $zmm0, $zmm1
...
---
# CHECK-LABEL: name: evex_scalar_to_evex_test
@@ -4086,548 +4086,548 @@
name: evex_scalar_to_evex_test
body: |
bb.0:
- ; CHECK: %xmm16 = VADDSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VADDSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VADDSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VADDSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VADDSDZrr %xmm16, %xmm1
- %xmm16 = VADDSDZrr %xmm16, %xmm1
- ; CHECK: %xmm16 = VADDSDZrr_Int %xmm16, %xmm1
- %xmm16 = VADDSDZrr_Int %xmm16, %xmm1
- ; CHECK: %xmm16 = VADDSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VADDSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VADDSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VADDSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VADDSSZrr %xmm16, %xmm1
- %xmm16 = VADDSSZrr %xmm16, %xmm1
- ; CHECK: %xmm16 = VADDSSZrr_Int %xmm16, %xmm1
- %xmm16 = VADDSSZrr_Int %xmm16, %xmm1
- ; CHECK: %xmm16 = VDIVSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VDIVSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VDIVSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VDIVSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VDIVSDZrr %xmm16, %xmm1
- %xmm16 = VDIVSDZrr %xmm16, %xmm1
- ; CHECK: %xmm16 = VDIVSDZrr_Int %xmm16, %xmm1
- %xmm16 = VDIVSDZrr_Int %xmm16, %xmm1
- ; CHECK: %xmm16 = VDIVSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VDIVSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VDIVSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VDIVSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VDIVSSZrr %xmm16, %xmm1
- %xmm16 = VDIVSSZrr %xmm16, %xmm1
- ; CHECK: %xmm16 = VDIVSSZrr_Int %xmm16, %xmm1
- %xmm16 = VDIVSSZrr_Int %xmm16, %xmm1
- ; CHECK: %xmm16 = VMAXCSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMAXCSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMAXCSDZrr %xmm16, %xmm1
- %xmm16 = VMAXCSDZrr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMAXCSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMAXCSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMAXCSSZrr %xmm16, %xmm1
- %xmm16 = VMAXCSSZrr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMAXSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMAXSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMAXSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMAXSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMAXSDZrr %xmm16, %xmm1
- %xmm16 = VMAXSDZrr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMAXSDZrr_Int %xmm16, %xmm1
- %xmm16 = VMAXSDZrr_Int %xmm16, %xmm1
- ; CHECK: %xmm16 = VMAXSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMAXSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMAXSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMAXSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMAXSSZrr %xmm16, %xmm1
- %xmm16 = VMAXSSZrr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMAXSSZrr_Int %xmm16, %xmm1
- %xmm16 = VMAXSSZrr_Int %xmm16, %xmm1
- ; CHECK: %xmm16 = VMINCSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMINCSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMINCSDZrr %xmm16, %xmm1
- %xmm16 = VMINCSDZrr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMINCSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMINCSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMINCSSZrr %xmm16, %xmm1
- %xmm16 = VMINCSSZrr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMINSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMINSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMINSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMINSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMINSDZrr %xmm16, %xmm1
- %xmm16 = VMINSDZrr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMINSDZrr_Int %xmm16, %xmm1
- %xmm16 = VMINSDZrr_Int %xmm16, %xmm1
- ; CHECK: %xmm16 = VMINSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMINSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMINSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMINSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMINSSZrr %xmm16, %xmm1
- %xmm16 = VMINSSZrr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMINSSZrr_Int %xmm16, %xmm1
- %xmm16 = VMINSSZrr_Int %xmm16, %xmm1
- ; CHECK: %xmm16 = VMULSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMULSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMULSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMULSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMULSDZrr %xmm16, %xmm1
- %xmm16 = VMULSDZrr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMULSDZrr_Int %xmm16, %xmm1
- %xmm16 = VMULSDZrr_Int %xmm16, %xmm1
- ; CHECK: %xmm16 = VMULSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMULSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMULSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VMULSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VMULSSZrr %xmm16, %xmm1
- %xmm16 = VMULSSZrr %xmm16, %xmm1
- ; CHECK: %xmm16 = VMULSSZrr_Int %xmm16, %xmm1
- %xmm16 = VMULSSZrr_Int %xmm16, %xmm1
- ; CHECK: %xmm16 = VSUBSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VSUBSDZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VSUBSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VSUBSDZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VSUBSDZrr %xmm16, %xmm1
- %xmm16 = VSUBSDZrr %xmm16, %xmm1
- ; CHECK: %xmm16 = VSUBSDZrr_Int %xmm16, %xmm1
- %xmm16 = VSUBSDZrr_Int %xmm16, %xmm1
- ; CHECK: %xmm16 = VSUBSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VSUBSSZrm %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VSUBSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
- %xmm16 = VSUBSSZrm_Int %xmm16, %rip, 1, %noreg, %rax, %noreg
- ; CHECK: %xmm16 = VSUBSSZrr %xmm16, %xmm1
- %xmm16 = VSUBSSZrr %xmm16, %xmm1
- ; CHECK: %xmm16 = VSUBSSZrr_Int %xmm16, %xmm1
- %xmm16 = VSUBSSZrr_Int %xmm16, %xmm1
- ; CHECK: %xmm16 = VFMADD132SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMADD132SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMADD132SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMADD132SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMADD132SDZr %xmm16, %xmm1, %xmm2
- %xmm16 = VFMADD132SDZr %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADD132SDZr_Int %xmm16, %xmm1, %xmm2
- %xmm16 = VFMADD132SDZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADD132SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMADD132SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMADD132SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMADD132SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMADD132SSZr %xmm16, %xmm1, %xmm2
- %xmm16 = VFMADD132SSZr %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADD132SSZr_Int %xmm16, %xmm1, %xmm2
- %xmm16 = VFMADD132SSZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADD213SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMADD213SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMADD213SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMADD213SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMADD213SDZr %xmm16, %xmm1, %xmm2
- %xmm16 = VFMADD213SDZr %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADD213SDZr_Int %xmm16, %xmm1, %xmm2
- %xmm16 = VFMADD213SDZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADD213SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMADD213SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMADD213SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMADD213SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMADD213SSZr %xmm16, %xmm1, %xmm2
- %xmm16 = VFMADD213SSZr %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADD213SSZr_Int %xmm16, %xmm1, %xmm2
- %xmm16 = VFMADD213SSZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADD231SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMADD231SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMADD231SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMADD231SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMADD231SDZr %xmm16, %xmm1, %xmm2
- %xmm16 = VFMADD231SDZr %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADD231SDZr_Int %xmm16, %xmm1, %xmm2
- %xmm16 = VFMADD231SDZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADD231SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMADD231SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMADD231SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMADD231SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMADD231SSZr %xmm16, %xmm1, %xmm2
- %xmm16 = VFMADD231SSZr %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMADD231SSZr_Int %xmm16, %xmm1, %xmm2
- %xmm16 = VFMADD231SSZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB132SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMSUB132SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMSUB132SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMSUB132SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMSUB132SDZr %xmm16, %xmm1, %xmm2
- %xmm16 = VFMSUB132SDZr %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB132SDZr_Int %xmm16, %xmm1, %xmm2
- %xmm16 = VFMSUB132SDZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB132SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMSUB132SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMSUB132SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMSUB132SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMSUB132SSZr %xmm16, %xmm1, %xmm2
- %xmm16 = VFMSUB132SSZr %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB132SSZr_Int %xmm16, %xmm1, %xmm2
- %xmm16 = VFMSUB132SSZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB213SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMSUB213SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMSUB213SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMSUB213SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMSUB213SDZr %xmm16, %xmm1, %xmm2
- %xmm16 = VFMSUB213SDZr %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB213SDZr_Int %xmm16, %xmm1, %xmm2
- %xmm16 = VFMSUB213SDZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB213SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMSUB213SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMSUB213SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMSUB213SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMSUB213SSZr %xmm16, %xmm1, %xmm2
- %xmm16 = VFMSUB213SSZr %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB213SSZr_Int %xmm16, %xmm1, %xmm2
- %xmm16 = VFMSUB213SSZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB231SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMSUB231SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMSUB231SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMSUB231SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMSUB231SDZr %xmm16, %xmm1, %xmm2
- %xmm16 = VFMSUB231SDZr %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB231SDZr_Int %xmm16, %xmm1, %xmm2
- %xmm16 = VFMSUB231SDZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB231SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMSUB231SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMSUB231SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFMSUB231SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFMSUB231SSZr %xmm16, %xmm1, %xmm2
- %xmm16 = VFMSUB231SSZr %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFMSUB231SSZr_Int %xmm16, %xmm1, %xmm2
- %xmm16 = VFMSUB231SSZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD132SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMADD132SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMADD132SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMADD132SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMADD132SDZr %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMADD132SDZr %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD132SDZr_Int %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMADD132SDZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD132SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMADD132SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMADD132SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMADD132SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMADD132SSZr %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMADD132SSZr %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD132SSZr_Int %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMADD132SSZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD213SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMADD213SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMADD213SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMADD213SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMADD213SDZr %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMADD213SDZr %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD213SDZr_Int %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMADD213SDZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD213SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMADD213SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMADD213SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMADD213SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMADD213SSZr %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMADD213SSZr %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD213SSZr_Int %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMADD213SSZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD231SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMADD231SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMADD231SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMADD231SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMADD231SDZr %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMADD231SDZr %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD231SDZr_Int %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMADD231SDZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD231SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMADD231SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMADD231SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMADD231SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMADD231SSZr %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMADD231SSZr %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMADD231SSZr_Int %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMADD231SSZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB132SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMSUB132SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMSUB132SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMSUB132SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMSUB132SDZr %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMSUB132SDZr %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB132SDZr_Int %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMSUB132SDZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB132SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMSUB132SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMSUB132SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMSUB132SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMSUB132SSZr %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMSUB132SSZr %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB132SSZr_Int %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMSUB132SSZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB213SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMSUB213SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMSUB213SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMSUB213SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMSUB213SDZr %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMSUB213SDZr %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB213SDZr_Int %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMSUB213SDZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB213SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMSUB213SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMSUB213SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMSUB213SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMSUB213SSZr %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMSUB213SSZr %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB213SSZr_Int %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMSUB213SSZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB231SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMSUB231SDZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMSUB231SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMSUB231SDZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMSUB231SDZr %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMSUB231SDZr %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB231SDZr_Int %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMSUB231SDZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB231SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMSUB231SSZm %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMSUB231SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- %xmm16 = VFNMSUB231SSZm_Int %xmm16, %xmm16, %rsi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VFNMSUB231SSZr %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMSUB231SSZr %xmm16, %xmm1, %xmm2
- ; CHECK: %xmm16 = VFNMSUB231SSZr_Int %xmm16, %xmm1, %xmm2
- %xmm16 = VFNMSUB231SSZr_Int %xmm16, %xmm1, %xmm2
- ; CHECK: VPEXTRBZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, 3
- VPEXTRBZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, 3
- ; CHECK: %eax = VPEXTRBZrr %xmm16, 1
- %eax = VPEXTRBZrr %xmm16, 1
- ; CHECK: VPEXTRDZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, 3
- VPEXTRDZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, 3
- ; CHECK: %eax = VPEXTRDZrr %xmm16, 1
- %eax = VPEXTRDZrr %xmm16, 1
- ; CHECK: VPEXTRQZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, 3
- VPEXTRQZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, 3
- ; CHECK: %rax = VPEXTRQZrr %xmm16, 1
- %rax = VPEXTRQZrr %xmm16, 1
- ; CHECK: VPEXTRWZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, 3
- VPEXTRWZmr %rdi, 1, %noreg, 0, %noreg, %xmm16, 3
- ; CHECK: %eax = VPEXTRWZrr %xmm16, 1
- %eax = VPEXTRWZrr %xmm16, 1
- ; CHECK: %eax = VPEXTRWZrr_REV %xmm16, 1
- %eax = VPEXTRWZrr_REV %xmm16, 1
- ; CHECK: %xmm16 = VPINSRBZrm %xmm16, %rsi, 1, %noreg, 0, %noreg, 3
- %xmm16 = VPINSRBZrm %xmm16, %rsi, 1, %noreg, 0, %noreg, 3
- ; CHECK: %xmm16 = VPINSRBZrr %xmm16, %edi, 5
- %xmm16 = VPINSRBZrr %xmm16, %edi, 5
- ; CHECK: %xmm16 = VPINSRDZrm %xmm16, %rsi, 1, %noreg, 0, %noreg, 3
- %xmm16 = VPINSRDZrm %xmm16, %rsi, 1, %noreg, 0, %noreg, 3
- ; CHECK: %xmm16 = VPINSRDZrr %xmm16, %edi, 5
- %xmm16 = VPINSRDZrr %xmm16, %edi, 5
- ; CHECK: %xmm16 = VPINSRQZrm %xmm16, %rsi, 1, %noreg, 0, %noreg, 3
- %xmm16 = VPINSRQZrm %xmm16, %rsi, 1, %noreg, 0, %noreg, 3
- ; CHECK: %xmm16 = VPINSRQZrr %xmm16, %rdi, 5
- %xmm16 = VPINSRQZrr %xmm16, %rdi, 5
- ; CHECK: %xmm16 = VPINSRWZrm %xmm16, %rsi, 1, %noreg, 0, %noreg, 3
- %xmm16 = VPINSRWZrm %xmm16, %rsi, 1, %noreg, 0, %noreg, 3
- ; CHECK: %xmm16 = VPINSRWZrr %xmm16, %edi, 5
- %xmm16 = VPINSRWZrr %xmm16, %edi, 5
- ; CHECK: %xmm16 = VSQRTSDZm %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg
- %xmm16 = VSQRTSDZm %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm16 = VSQRTSDZm_Int %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg
- %xmm16 = VSQRTSDZm_Int %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm16 = VSQRTSDZr %xmm16, %noreg
- %xmm16 = VSQRTSDZr %xmm16, %noreg
- ; CHECK: %xmm16 = VSQRTSDZr_Int %xmm16, %noreg
- %xmm16 = VSQRTSDZr_Int %xmm16, %noreg
- ; CHECK: %xmm16 = VSQRTSSZm %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg
- %xmm16 = VSQRTSSZm %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm16 = VSQRTSSZm_Int %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg
- %xmm16 = VSQRTSSZm_Int %xmm16, %noreg, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm16 = VSQRTSSZr %xmm16, %noreg
- %xmm16 = VSQRTSSZr %xmm16, %noreg
- ; CHECK: %xmm16 = VSQRTSSZr_Int %xmm16, %noreg
- %xmm16 = VSQRTSSZr_Int %xmm16, %noreg
- ; CHECK: %rdi = VCVTSD2SI64Zrm_Int %rdi, %xmm16, 1, %noreg, 0
- %rdi = VCVTSD2SI64Zrm_Int %rdi, %xmm16, 1, %noreg, 0
- ; CHECK: %rdi = VCVTSD2SI64Zrr_Int %xmm16
- %rdi = VCVTSD2SI64Zrr_Int %xmm16
- ; CHECK: %edi = VCVTSD2SIZrm_Int %rdi, %xmm16, 1, %noreg, 0
- %edi = VCVTSD2SIZrm_Int %rdi, %xmm16, 1, %noreg, 0
- ; CHECK: %edi = VCVTSD2SIZrr_Int %xmm16
- %edi = VCVTSD2SIZrr_Int %xmm16
- ; CHECK: %xmm16 = VCVTSD2SSZrm %xmm16, %rdi, 1, %noreg, 0, %noreg
- %xmm16 = VCVTSD2SSZrm %xmm16, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VCVTSD2SSZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg
- %xmm16 = VCVTSD2SSZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VCVTSD2SSZrr %xmm16, %noreg
- %xmm16 = VCVTSD2SSZrr %xmm16, %noreg
- ; CHECK: %xmm16 = VCVTSD2SSZrr_Int %xmm16, %noreg
- %xmm16 = VCVTSD2SSZrr_Int %xmm16, %noreg
- ; CHECK: %xmm16 = VCVTSI2SDZrm %xmm16, %rdi, 1, %noreg, 0, %noreg
- %xmm16 = VCVTSI2SDZrm %xmm16, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VCVTSI2SDZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg
- %xmm16 = VCVTSI2SDZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VCVTSI2SDZrr %xmm16, %noreg
- %xmm16 = VCVTSI2SDZrr %xmm16, %noreg
- ; CHECK: %xmm16 = VCVTSI2SDZrr_Int %xmm16, %noreg
- %xmm16 = VCVTSI2SDZrr_Int %xmm16, %noreg
- ; CHECK: %xmm16 = VCVTSI2SSZrm %xmm16, %rdi, 1, %noreg, 0, %noreg
- %xmm16 = VCVTSI2SSZrm %xmm16, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VCVTSI2SSZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg
- %xmm16 = VCVTSI2SSZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VCVTSI2SSZrr %xmm16, %noreg
- %xmm16 = VCVTSI2SSZrr %xmm16, %noreg
- ; CHECK: %xmm16 = VCVTSI2SSZrr_Int %xmm16, %noreg
- %xmm16 = VCVTSI2SSZrr_Int %xmm16, %noreg
- ; CHECK: %xmm16 = VCVTSI642SDZrm %xmm16, %rdi, 1, %noreg, 0, %noreg
- %xmm16 = VCVTSI642SDZrm %xmm16, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VCVTSI642SDZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg
- %xmm16 = VCVTSI642SDZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VCVTSI642SDZrr %xmm16, %noreg
- %xmm16 = VCVTSI642SDZrr %xmm16, %noreg
- ; CHECK: %xmm16 = VCVTSI642SDZrr_Int %xmm16, %noreg
- %xmm16 = VCVTSI642SDZrr_Int %xmm16, %noreg
- ; CHECK: %xmm16 = VCVTSI642SSZrm %xmm16, %rdi, 1, %noreg, 0, %noreg
- %xmm16 = VCVTSI642SSZrm %xmm16, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VCVTSI642SSZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg
- %xmm16 = VCVTSI642SSZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VCVTSI642SSZrr %xmm16, %noreg
- %xmm16 = VCVTSI642SSZrr %xmm16, %noreg
- ; CHECK: %xmm16 = VCVTSI642SSZrr_Int %xmm16, %noreg
- %xmm16 = VCVTSI642SSZrr_Int %xmm16, %noreg
- ; CHECK: %xmm16 = VCVTSS2SDZrm %xmm16, %rdi, 1, %noreg, 0, %noreg
- %xmm16 = VCVTSS2SDZrm %xmm16, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VCVTSS2SDZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg
- %xmm16 = VCVTSS2SDZrm_Int %xmm16, %rdi, 1, %noreg, 0, %noreg
- ; CHECK: %xmm16 = VCVTSS2SDZrr %xmm16, %noreg
- %xmm16 = VCVTSS2SDZrr %xmm16, %noreg
- ; CHECK: %xmm16 = VCVTSS2SDZrr_Int %xmm16, %noreg
- %xmm16 = VCVTSS2SDZrr_Int %xmm16, %noreg
- ; CHECK: %rdi = VCVTSS2SI64Zrm_Int %rdi, %xmm16, 1, %noreg, 0
- %rdi = VCVTSS2SI64Zrm_Int %rdi, %xmm16, 1, %noreg, 0
- ; CHECK: %rdi = VCVTSS2SI64Zrr_Int %xmm16
- %rdi = VCVTSS2SI64Zrr_Int %xmm16
- ; CHECK: %edi = VCVTSS2SIZrm_Int %rdi, %xmm16, 1, %noreg, 0
- %edi = VCVTSS2SIZrm_Int %rdi, %xmm16, 1, %noreg, 0
- ; CHECK: %edi = VCVTSS2SIZrr_Int %xmm16
- %edi = VCVTSS2SIZrr_Int %xmm16
- ; CHECK: %rdi = VCVTTSD2SI64Zrm %rdi, %xmm16, 1, %noreg, 0
- %rdi = VCVTTSD2SI64Zrm %rdi, %xmm16, 1, %noreg, 0
- ; CHECK: %rdi = VCVTTSD2SI64Zrm_Int %rdi, %xmm16, 1, %noreg, 0
- %rdi = VCVTTSD2SI64Zrm_Int %rdi, %xmm16, 1, %noreg, 0
- ; CHECK: %rdi = VCVTTSD2SI64Zrr %xmm16
- %rdi = VCVTTSD2SI64Zrr %xmm16
- ; CHECK: %rdi = VCVTTSD2SI64Zrr_Int %xmm16
- %rdi = VCVTTSD2SI64Zrr_Int %xmm16
- ; CHECK: %edi = VCVTTSD2SIZrm %rdi, %xmm16, 1, %noreg, 0
- %edi = VCVTTSD2SIZrm %rdi, %xmm16, 1, %noreg, 0
- ; CHECK: %edi = VCVTTSD2SIZrm_Int %rdi, %xmm16, 1, %noreg, 0
- %edi = VCVTTSD2SIZrm_Int %rdi, %xmm16, 1, %noreg, 0
- ; CHECK: %edi = VCVTTSD2SIZrr %xmm16
- %edi = VCVTTSD2SIZrr %xmm16
- ; CHECK: %edi = VCVTTSD2SIZrr_Int %xmm16
- %edi = VCVTTSD2SIZrr_Int %xmm16
- ; CHECK: %rdi = VCVTTSS2SI64Zrm %rdi, %xmm16, 1, %noreg, 0
- %rdi = VCVTTSS2SI64Zrm %rdi, %xmm16, 1, %noreg, 0
- ; CHECK: %rdi = VCVTTSS2SI64Zrm_Int %rdi, %xmm16, 1, %noreg, 0
- %rdi = VCVTTSS2SI64Zrm_Int %rdi, %xmm16, 1, %noreg, 0
- ; CHECK: %rdi = VCVTTSS2SI64Zrr %xmm16
- %rdi = VCVTTSS2SI64Zrr %xmm16
- ; CHECK: %rdi = VCVTTSS2SI64Zrr_Int %xmm16
- %rdi = VCVTTSS2SI64Zrr_Int %xmm16
- ; CHECK: %edi = VCVTTSS2SIZrm %rdi, %xmm16, 1, %noreg, 0
- %edi = VCVTTSS2SIZrm %rdi, %xmm16, 1, %noreg, 0
- ; CHECK: %edi = VCVTTSS2SIZrm_Int %rdi, %xmm16, 1, %noreg, 0
- %edi = VCVTTSS2SIZrm_Int %rdi, %xmm16, 1, %noreg, 0
- ; CHECK: %edi = VCVTTSS2SIZrr %xmm16
- %edi = VCVTTSS2SIZrr %xmm16
- ; CHECK: %edi = VCVTTSS2SIZrr_Int %xmm16
- %edi = VCVTTSS2SIZrr_Int %xmm16
- ; CHECK: %xmm16 = VMOV64toSDZrr %rdi
- %xmm16 = VMOV64toSDZrr %rdi
- ; CHECK: %xmm16 = VMOVDI2SSZrm %rip, %noreg, %noreg, %noreg, %noreg
- %xmm16 = VMOVDI2SSZrm %rip, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm16 = VMOVDI2SSZrr %eax
- %xmm16 = VMOVDI2SSZrr %eax
- ; CHECK: VMOVSDZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
- VMOVSDZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm16 = VMOVSDZrm %rip, %noreg, %noreg, %noreg, %noreg
- %xmm16 = VMOVSDZrm %rip, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm16 = VMOVSDZrr %xmm16, %noreg
- %xmm16 = VMOVSDZrr %xmm16, %noreg
- ; CHECK: %xmm16 = VMOVSDZrr_REV %xmm16, %noreg
- %xmm16 = VMOVSDZrr_REV %xmm16, %noreg
- ; CHECK: %rax = VMOVSDto64Zrr %xmm16
- %rax = VMOVSDto64Zrr %xmm16
- ; CHECK: VMOVSDto64Zmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
- VMOVSDto64Zmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
- ; CHECK: VMOVSSZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
- VMOVSSZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm16 = VMOVSSZrm %rip, %noreg, %noreg, %noreg, %noreg
- %xmm16 = VMOVSSZrm %rip, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm16 = VMOVSSZrr %xmm16, %noreg
- %xmm16 = VMOVSSZrr %xmm16, %noreg
- ; CHECK: %xmm16 = VMOVSSZrr_REV %xmm16, %noreg
- %xmm16 = VMOVSSZrr_REV %xmm16, %noreg
- ; CHECK: VMOVSS2DIZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
- VMOVSS2DIZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %eax = VMOVSS2DIZrr %xmm16
- %eax = VMOVSS2DIZrr %xmm16
- ; CHECK: %xmm16 = VMOV64toPQIZrr %rdi
- %xmm16 = VMOV64toPQIZrr %rdi
- ; CHECK: %xmm16 = VMOV64toPQIZrm %rdi, %noreg, %noreg, %noreg, %noreg
- %xmm16 = VMOV64toPQIZrm %rdi, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm16 = VMOV64toSDZrr %rdi
- %xmm16 = VMOV64toSDZrr %rdi
- ; CHECK: %xmm16 = VMOVDI2PDIZrm %rip, %noreg, %noreg, %noreg, %noreg
- %xmm16 = VMOVDI2PDIZrm %rip, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm16 = VMOVDI2PDIZrr %edi
- %xmm16 = VMOVDI2PDIZrr %edi
- ; CHECK: %xmm16 = VMOVLHPSZrr %xmm16, %noreg
- %xmm16 = VMOVLHPSZrr %xmm16, %noreg
- ; CHECK: %xmm16 = VMOVHLPSZrr %xmm16, %noreg
- %xmm16 = VMOVHLPSZrr %xmm16, %noreg
- ; CHECK: VMOVPDI2DIZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
- VMOVPDI2DIZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %edi = VMOVPDI2DIZrr %xmm16
- %edi = VMOVPDI2DIZrr %xmm16
- ; CHECK: %xmm16 = VMOVPQI2QIZrr %xmm16
- %xmm16 = VMOVPQI2QIZrr %xmm16
- ; CHECK: VMOVPQI2QIZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
- VMOVPQI2QIZmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %rdi = VMOVPQIto64Zrr %xmm16
- %rdi = VMOVPQIto64Zrr %xmm16
- ; CHECK: VMOVPQIto64Zmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
- VMOVPQIto64Zmr %rdi, %xmm16, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm16 = VMOVQI2PQIZrm %rip, %noreg, %noreg, %noreg, %noreg
- %xmm16 = VMOVQI2PQIZrm %rip, %noreg, %noreg, %noreg, %noreg
- ; CHECK: %xmm16 = VMOVZPQILo2PQIZrr %xmm16
- %xmm16 = VMOVZPQILo2PQIZrr %xmm16
- ; CHECK: VCOMISDZrm_Int %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- VCOMISDZrm_Int %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- ; CHECK: VCOMISDZrr_Int %xmm16, %xmm1, implicit-def %eflags
- VCOMISDZrr_Int %xmm16, %xmm1, implicit-def %eflags
- ; CHECK: VCOMISSZrm_Int %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- VCOMISSZrm_Int %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- ; CHECK: VCOMISSZrr_Int %xmm16, %xmm1, implicit-def %eflags
- VCOMISSZrr_Int %xmm16, %xmm1, implicit-def %eflags
- ; CHECK: VUCOMISDZrm_Int %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- VUCOMISDZrm_Int %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- ; CHECK: VUCOMISDZrr_Int %xmm16, %xmm1, implicit-def %eflags
- VUCOMISDZrr_Int %xmm16, %xmm1, implicit-def %eflags
- ; CHECK: VUCOMISSZrm_Int %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- VUCOMISSZrm_Int %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- ; CHECK: VUCOMISSZrr_Int %xmm16, %xmm1, implicit-def %eflags
- VUCOMISSZrr_Int %xmm16, %xmm1, implicit-def %eflags
- ; CHECK: VCOMISDZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- VCOMISDZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- ; CHECK: VCOMISDZrr %xmm16, %xmm1, implicit-def %eflags
- VCOMISDZrr %xmm16, %xmm1, implicit-def %eflags
- ; CHECK: VCOMISSZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- VCOMISSZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- ; CHECK: VCOMISSZrr %xmm16, %xmm1, implicit-def %eflags
- VCOMISSZrr %xmm16, %xmm1, implicit-def %eflags
- ; CHECK: VUCOMISDZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- VUCOMISDZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- ; CHECK: VUCOMISDZrr %xmm16, %xmm1, implicit-def %eflags
- VUCOMISDZrr %xmm16, %xmm1, implicit-def %eflags
- ; CHECK: VUCOMISSZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- VUCOMISSZrm %xmm16, %rdi, %noreg, %noreg, %noreg, %noreg, implicit-def %eflags
- ; CHECK: VUCOMISSZrr %xmm16, %xmm1, implicit-def %eflags
- VUCOMISSZrr %xmm16, %xmm1, implicit-def %eflags
+ ; CHECK: $xmm16 = VADDSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VADDSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VADDSDZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VADDSDZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VADDSDZrr $xmm16, $xmm1
+ $xmm16 = VADDSDZrr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VADDSDZrr_Int $xmm16, $xmm1
+ $xmm16 = VADDSDZrr_Int $xmm16, $xmm1
+ ; CHECK: $xmm16 = VADDSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VADDSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VADDSSZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VADDSSZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VADDSSZrr $xmm16, $xmm1
+ $xmm16 = VADDSSZrr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VADDSSZrr_Int $xmm16, $xmm1
+ $xmm16 = VADDSSZrr_Int $xmm16, $xmm1
+ ; CHECK: $xmm16 = VDIVSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VDIVSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VDIVSDZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VDIVSDZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VDIVSDZrr $xmm16, $xmm1
+ $xmm16 = VDIVSDZrr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VDIVSDZrr_Int $xmm16, $xmm1
+ $xmm16 = VDIVSDZrr_Int $xmm16, $xmm1
+ ; CHECK: $xmm16 = VDIVSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VDIVSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VDIVSSZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VDIVSSZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VDIVSSZrr $xmm16, $xmm1
+ $xmm16 = VDIVSSZrr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VDIVSSZrr_Int $xmm16, $xmm1
+ $xmm16 = VDIVSSZrr_Int $xmm16, $xmm1
+ ; CHECK: $xmm16 = VMAXCSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMAXCSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMAXCSDZrr $xmm16, $xmm1
+ $xmm16 = VMAXCSDZrr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VMAXCSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMAXCSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMAXCSSZrr $xmm16, $xmm1
+ $xmm16 = VMAXCSSZrr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VMAXSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMAXSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMAXSDZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMAXSDZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMAXSDZrr $xmm16, $xmm1
+ $xmm16 = VMAXSDZrr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VMAXSDZrr_Int $xmm16, $xmm1
+ $xmm16 = VMAXSDZrr_Int $xmm16, $xmm1
+ ; CHECK: $xmm16 = VMAXSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMAXSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMAXSSZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMAXSSZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMAXSSZrr $xmm16, $xmm1
+ $xmm16 = VMAXSSZrr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VMAXSSZrr_Int $xmm16, $xmm1
+ $xmm16 = VMAXSSZrr_Int $xmm16, $xmm1
+ ; CHECK: $xmm16 = VMINCSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMINCSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMINCSDZrr $xmm16, $xmm1
+ $xmm16 = VMINCSDZrr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VMINCSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMINCSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMINCSSZrr $xmm16, $xmm1
+ $xmm16 = VMINCSSZrr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VMINSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMINSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMINSDZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMINSDZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMINSDZrr $xmm16, $xmm1
+ $xmm16 = VMINSDZrr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VMINSDZrr_Int $xmm16, $xmm1
+ $xmm16 = VMINSDZrr_Int $xmm16, $xmm1
+ ; CHECK: $xmm16 = VMINSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMINSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMINSSZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMINSSZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMINSSZrr $xmm16, $xmm1
+ $xmm16 = VMINSSZrr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VMINSSZrr_Int $xmm16, $xmm1
+ $xmm16 = VMINSSZrr_Int $xmm16, $xmm1
+ ; CHECK: $xmm16 = VMULSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMULSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMULSDZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMULSDZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMULSDZrr $xmm16, $xmm1
+ $xmm16 = VMULSDZrr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VMULSDZrr_Int $xmm16, $xmm1
+ $xmm16 = VMULSDZrr_Int $xmm16, $xmm1
+ ; CHECK: $xmm16 = VMULSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMULSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMULSSZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VMULSSZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VMULSSZrr $xmm16, $xmm1
+ $xmm16 = VMULSSZrr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VMULSSZrr_Int $xmm16, $xmm1
+ $xmm16 = VMULSSZrr_Int $xmm16, $xmm1
+ ; CHECK: $xmm16 = VSUBSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VSUBSDZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VSUBSDZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VSUBSDZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VSUBSDZrr $xmm16, $xmm1
+ $xmm16 = VSUBSDZrr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VSUBSDZrr_Int $xmm16, $xmm1
+ $xmm16 = VSUBSDZrr_Int $xmm16, $xmm1
+ ; CHECK: $xmm16 = VSUBSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VSUBSSZrm $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VSUBSSZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg
+ $xmm16 = VSUBSSZrm_Int $xmm16, $rip, 1, $noreg, $rax, $noreg
+ ; CHECK: $xmm16 = VSUBSSZrr $xmm16, $xmm1
+ $xmm16 = VSUBSSZrr $xmm16, $xmm1
+ ; CHECK: $xmm16 = VSUBSSZrr_Int $xmm16, $xmm1
+ $xmm16 = VSUBSSZrr_Int $xmm16, $xmm1
+ ; CHECK: $xmm16 = VFMADD132SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMADD132SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMADD132SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMADD132SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMADD132SDZr $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMADD132SDZr $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMADD132SDZr_Int $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMADD132SDZr_Int $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMADD132SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMADD132SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMADD132SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMADD132SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMADD132SSZr $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMADD132SSZr $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMADD132SSZr_Int $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMADD132SSZr_Int $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMADD213SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMADD213SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMADD213SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMADD213SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMADD213SDZr $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMADD213SDZr $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMADD213SDZr_Int $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMADD213SDZr_Int $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMADD213SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMADD213SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMADD213SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMADD213SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMADD213SSZr $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMADD213SSZr $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMADD213SSZr_Int $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMADD213SSZr_Int $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMADD231SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMADD231SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMADD231SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMADD231SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMADD231SDZr $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMADD231SDZr $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMADD231SDZr_Int $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMADD231SDZr_Int $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMADD231SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMADD231SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMADD231SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMADD231SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMADD231SSZr $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMADD231SSZr $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMADD231SSZr_Int $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMADD231SSZr_Int $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMSUB132SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMSUB132SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMSUB132SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMSUB132SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMSUB132SDZr $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMSUB132SDZr $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMSUB132SDZr_Int $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMSUB132SDZr_Int $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMSUB132SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMSUB132SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMSUB132SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMSUB132SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMSUB132SSZr $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMSUB132SSZr $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMSUB132SSZr_Int $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMSUB132SSZr_Int $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMSUB213SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMSUB213SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMSUB213SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMSUB213SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMSUB213SDZr $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMSUB213SDZr $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMSUB213SDZr_Int $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMSUB213SDZr_Int $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMSUB213SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMSUB213SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMSUB213SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMSUB213SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMSUB213SSZr $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMSUB213SSZr $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMSUB213SSZr_Int $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMSUB213SSZr_Int $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMSUB231SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMSUB231SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMSUB231SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMSUB231SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMSUB231SDZr $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMSUB231SDZr $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMSUB231SDZr_Int $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMSUB231SDZr_Int $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMSUB231SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMSUB231SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMSUB231SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFMSUB231SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFMSUB231SSZr $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMSUB231SSZr $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFMSUB231SSZr_Int $xmm16, $xmm1, $xmm2
+ $xmm16 = VFMSUB231SSZr_Int $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMADD132SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMADD132SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMADD132SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMADD132SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMADD132SDZr $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMADD132SDZr $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMADD132SDZr_Int $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMADD132SDZr_Int $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMADD132SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMADD132SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMADD132SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMADD132SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMADD132SSZr $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMADD132SSZr $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMADD132SSZr_Int $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMADD132SSZr_Int $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMADD213SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMADD213SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMADD213SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMADD213SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMADD213SDZr $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMADD213SDZr $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMADD213SDZr_Int $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMADD213SDZr_Int $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMADD213SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMADD213SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMADD213SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMADD213SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMADD213SSZr $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMADD213SSZr $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMADD213SSZr_Int $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMADD213SSZr_Int $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMADD231SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMADD231SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMADD231SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMADD231SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMADD231SDZr $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMADD231SDZr $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMADD231SDZr_Int $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMADD231SDZr_Int $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMADD231SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMADD231SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMADD231SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMADD231SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMADD231SSZr $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMADD231SSZr $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMADD231SSZr_Int $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMADD231SSZr_Int $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMSUB132SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMSUB132SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMSUB132SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMSUB132SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMSUB132SDZr $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMSUB132SDZr $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMSUB132SDZr_Int $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMSUB132SDZr_Int $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMSUB132SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMSUB132SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMSUB132SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMSUB132SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMSUB132SSZr $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMSUB132SSZr $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMSUB132SSZr_Int $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMSUB132SSZr_Int $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMSUB213SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMSUB213SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMSUB213SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMSUB213SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMSUB213SDZr $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMSUB213SDZr $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMSUB213SDZr_Int $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMSUB213SDZr_Int $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMSUB213SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMSUB213SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMSUB213SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMSUB213SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMSUB213SSZr $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMSUB213SSZr $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMSUB213SSZr_Int $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMSUB213SSZr_Int $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMSUB231SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMSUB231SDZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMSUB231SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMSUB231SDZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMSUB231SDZr $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMSUB231SDZr $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMSUB231SDZr_Int $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMSUB231SDZr_Int $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMSUB231SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMSUB231SSZm $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMSUB231SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ $xmm16 = VFNMSUB231SSZm_Int $xmm16, $xmm16, $rsi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VFNMSUB231SSZr $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMSUB231SSZr $xmm16, $xmm1, $xmm2
+ ; CHECK: $xmm16 = VFNMSUB231SSZr_Int $xmm16, $xmm1, $xmm2
+ $xmm16 = VFNMSUB231SSZr_Int $xmm16, $xmm1, $xmm2
+ ; CHECK: VPEXTRBZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, 3
+ VPEXTRBZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, 3
+ ; CHECK: $eax = VPEXTRBZrr $xmm16, 1
+ $eax = VPEXTRBZrr $xmm16, 1
+ ; CHECK: VPEXTRDZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, 3
+ VPEXTRDZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, 3
+ ; CHECK: $eax = VPEXTRDZrr $xmm16, 1
+ $eax = VPEXTRDZrr $xmm16, 1
+ ; CHECK: VPEXTRQZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, 3
+ VPEXTRQZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, 3
+ ; CHECK: $rax = VPEXTRQZrr $xmm16, 1
+ $rax = VPEXTRQZrr $xmm16, 1
+ ; CHECK: VPEXTRWZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, 3
+ VPEXTRWZmr $rdi, 1, $noreg, 0, $noreg, $xmm16, 3
+ ; CHECK: $eax = VPEXTRWZrr $xmm16, 1
+ $eax = VPEXTRWZrr $xmm16, 1
+ ; CHECK: $eax = VPEXTRWZrr_REV $xmm16, 1
+ $eax = VPEXTRWZrr_REV $xmm16, 1
+ ; CHECK: $xmm16 = VPINSRBZrm $xmm16, $rsi, 1, $noreg, 0, $noreg, 3
+ $xmm16 = VPINSRBZrm $xmm16, $rsi, 1, $noreg, 0, $noreg, 3
+ ; CHECK: $xmm16 = VPINSRBZrr $xmm16, $edi, 5
+ $xmm16 = VPINSRBZrr $xmm16, $edi, 5
+ ; CHECK: $xmm16 = VPINSRDZrm $xmm16, $rsi, 1, $noreg, 0, $noreg, 3
+ $xmm16 = VPINSRDZrm $xmm16, $rsi, 1, $noreg, 0, $noreg, 3
+ ; CHECK: $xmm16 = VPINSRDZrr $xmm16, $edi, 5
+ $xmm16 = VPINSRDZrr $xmm16, $edi, 5
+ ; CHECK: $xmm16 = VPINSRQZrm $xmm16, $rsi, 1, $noreg, 0, $noreg, 3
+ $xmm16 = VPINSRQZrm $xmm16, $rsi, 1, $noreg, 0, $noreg, 3
+ ; CHECK: $xmm16 = VPINSRQZrr $xmm16, $rdi, 5
+ $xmm16 = VPINSRQZrr $xmm16, $rdi, 5
+ ; CHECK: $xmm16 = VPINSRWZrm $xmm16, $rsi, 1, $noreg, 0, $noreg, 3
+ $xmm16 = VPINSRWZrm $xmm16, $rsi, 1, $noreg, 0, $noreg, 3
+ ; CHECK: $xmm16 = VPINSRWZrr $xmm16, $edi, 5
+ $xmm16 = VPINSRWZrr $xmm16, $edi, 5
+ ; CHECK: $xmm16 = VSQRTSDZm $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg
+ $xmm16 = VSQRTSDZm $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm16 = VSQRTSDZm_Int $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg
+ $xmm16 = VSQRTSDZm_Int $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm16 = VSQRTSDZr $xmm16, $noreg
+ $xmm16 = VSQRTSDZr $xmm16, $noreg
+ ; CHECK: $xmm16 = VSQRTSDZr_Int $xmm16, $noreg
+ $xmm16 = VSQRTSDZr_Int $xmm16, $noreg
+ ; CHECK: $xmm16 = VSQRTSSZm $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg
+ $xmm16 = VSQRTSSZm $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm16 = VSQRTSSZm_Int $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg
+ $xmm16 = VSQRTSSZm_Int $xmm16, $noreg, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm16 = VSQRTSSZr $xmm16, $noreg
+ $xmm16 = VSQRTSSZr $xmm16, $noreg
+ ; CHECK: $xmm16 = VSQRTSSZr_Int $xmm16, $noreg
+ $xmm16 = VSQRTSSZr_Int $xmm16, $noreg
+ ; CHECK: $rdi = VCVTSD2SI64Zrm_Int $rdi, $xmm16, 1, $noreg, 0
+ $rdi = VCVTSD2SI64Zrm_Int $rdi, $xmm16, 1, $noreg, 0
+ ; CHECK: $rdi = VCVTSD2SI64Zrr_Int $xmm16
+ $rdi = VCVTSD2SI64Zrr_Int $xmm16
+ ; CHECK: $edi = VCVTSD2SIZrm_Int $rdi, $xmm16, 1, $noreg, 0
+ $edi = VCVTSD2SIZrm_Int $rdi, $xmm16, 1, $noreg, 0
+ ; CHECK: $edi = VCVTSD2SIZrr_Int $xmm16
+ $edi = VCVTSD2SIZrr_Int $xmm16
+ ; CHECK: $xmm16 = VCVTSD2SSZrm $xmm16, $rdi, 1, $noreg, 0, $noreg
+ $xmm16 = VCVTSD2SSZrm $xmm16, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VCVTSD2SSZrm_Int $xmm16, $rdi, 1, $noreg, 0, $noreg
+ $xmm16 = VCVTSD2SSZrm_Int $xmm16, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VCVTSD2SSZrr $xmm16, $noreg
+ $xmm16 = VCVTSD2SSZrr $xmm16, $noreg
+ ; CHECK: $xmm16 = VCVTSD2SSZrr_Int $xmm16, $noreg
+ $xmm16 = VCVTSD2SSZrr_Int $xmm16, $noreg
+ ; CHECK: $xmm16 = VCVTSI2SDZrm $xmm16, $rdi, 1, $noreg, 0, $noreg
+ $xmm16 = VCVTSI2SDZrm $xmm16, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VCVTSI2SDZrm_Int $xmm16, $rdi, 1, $noreg, 0, $noreg
+ $xmm16 = VCVTSI2SDZrm_Int $xmm16, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VCVTSI2SDZrr $xmm16, $noreg
+ $xmm16 = VCVTSI2SDZrr $xmm16, $noreg
+ ; CHECK: $xmm16 = VCVTSI2SDZrr_Int $xmm16, $noreg
+ $xmm16 = VCVTSI2SDZrr_Int $xmm16, $noreg
+ ; CHECK: $xmm16 = VCVTSI2SSZrm $xmm16, $rdi, 1, $noreg, 0, $noreg
+ $xmm16 = VCVTSI2SSZrm $xmm16, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VCVTSI2SSZrm_Int $xmm16, $rdi, 1, $noreg, 0, $noreg
+ $xmm16 = VCVTSI2SSZrm_Int $xmm16, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VCVTSI2SSZrr $xmm16, $noreg
+ $xmm16 = VCVTSI2SSZrr $xmm16, $noreg
+ ; CHECK: $xmm16 = VCVTSI2SSZrr_Int $xmm16, $noreg
+ $xmm16 = VCVTSI2SSZrr_Int $xmm16, $noreg
+ ; CHECK: $xmm16 = VCVTSI642SDZrm $xmm16, $rdi, 1, $noreg, 0, $noreg
+ $xmm16 = VCVTSI642SDZrm $xmm16, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VCVTSI642SDZrm_Int $xmm16, $rdi, 1, $noreg, 0, $noreg
+ $xmm16 = VCVTSI642SDZrm_Int $xmm16, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VCVTSI642SDZrr $xmm16, $noreg
+ $xmm16 = VCVTSI642SDZrr $xmm16, $noreg
+ ; CHECK: $xmm16 = VCVTSI642SDZrr_Int $xmm16, $noreg
+ $xmm16 = VCVTSI642SDZrr_Int $xmm16, $noreg
+ ; CHECK: $xmm16 = VCVTSI642SSZrm $xmm16, $rdi, 1, $noreg, 0, $noreg
+ $xmm16 = VCVTSI642SSZrm $xmm16, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VCVTSI642SSZrm_Int $xmm16, $rdi, 1, $noreg, 0, $noreg
+ $xmm16 = VCVTSI642SSZrm_Int $xmm16, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VCVTSI642SSZrr $xmm16, $noreg
+ $xmm16 = VCVTSI642SSZrr $xmm16, $noreg
+ ; CHECK: $xmm16 = VCVTSI642SSZrr_Int $xmm16, $noreg
+ $xmm16 = VCVTSI642SSZrr_Int $xmm16, $noreg
+ ; CHECK: $xmm16 = VCVTSS2SDZrm $xmm16, $rdi, 1, $noreg, 0, $noreg
+ $xmm16 = VCVTSS2SDZrm $xmm16, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VCVTSS2SDZrm_Int $xmm16, $rdi, 1, $noreg, 0, $noreg
+ $xmm16 = VCVTSS2SDZrm_Int $xmm16, $rdi, 1, $noreg, 0, $noreg
+ ; CHECK: $xmm16 = VCVTSS2SDZrr $xmm16, $noreg
+ $xmm16 = VCVTSS2SDZrr $xmm16, $noreg
+ ; CHECK: $xmm16 = VCVTSS2SDZrr_Int $xmm16, $noreg
+ $xmm16 = VCVTSS2SDZrr_Int $xmm16, $noreg
+ ; CHECK: $rdi = VCVTSS2SI64Zrm_Int $rdi, $xmm16, 1, $noreg, 0
+ $rdi = VCVTSS2SI64Zrm_Int $rdi, $xmm16, 1, $noreg, 0
+ ; CHECK: $rdi = VCVTSS2SI64Zrr_Int $xmm16
+ $rdi = VCVTSS2SI64Zrr_Int $xmm16
+ ; CHECK: $edi = VCVTSS2SIZrm_Int $rdi, $xmm16, 1, $noreg, 0
+ $edi = VCVTSS2SIZrm_Int $rdi, $xmm16, 1, $noreg, 0
+ ; CHECK: $edi = VCVTSS2SIZrr_Int $xmm16
+ $edi = VCVTSS2SIZrr_Int $xmm16
+ ; CHECK: $rdi = VCVTTSD2SI64Zrm $rdi, $xmm16, 1, $noreg, 0
+ $rdi = VCVTTSD2SI64Zrm $rdi, $xmm16, 1, $noreg, 0
+ ; CHECK: $rdi = VCVTTSD2SI64Zrm_Int $rdi, $xmm16, 1, $noreg, 0
+ $rdi = VCVTTSD2SI64Zrm_Int $rdi, $xmm16, 1, $noreg, 0
+ ; CHECK: $rdi = VCVTTSD2SI64Zrr $xmm16
+ $rdi = VCVTTSD2SI64Zrr $xmm16
+ ; CHECK: $rdi = VCVTTSD2SI64Zrr_Int $xmm16
+ $rdi = VCVTTSD2SI64Zrr_Int $xmm16
+ ; CHECK: $edi = VCVTTSD2SIZrm $rdi, $xmm16, 1, $noreg, 0
+ $edi = VCVTTSD2SIZrm $rdi, $xmm16, 1, $noreg, 0
+ ; CHECK: $edi = VCVTTSD2SIZrm_Int $rdi, $xmm16, 1, $noreg, 0
+ $edi = VCVTTSD2SIZrm_Int $rdi, $xmm16, 1, $noreg, 0
+ ; CHECK: $edi = VCVTTSD2SIZrr $xmm16
+ $edi = VCVTTSD2SIZrr $xmm16
+ ; CHECK: $edi = VCVTTSD2SIZrr_Int $xmm16
+ $edi = VCVTTSD2SIZrr_Int $xmm16
+ ; CHECK: $rdi = VCVTTSS2SI64Zrm $rdi, $xmm16, 1, $noreg, 0
+ $rdi = VCVTTSS2SI64Zrm $rdi, $xmm16, 1, $noreg, 0
+ ; CHECK: $rdi = VCVTTSS2SI64Zrm_Int $rdi, $xmm16, 1, $noreg, 0
+ $rdi = VCVTTSS2SI64Zrm_Int $rdi, $xmm16, 1, $noreg, 0
+ ; CHECK: $rdi = VCVTTSS2SI64Zrr $xmm16
+ $rdi = VCVTTSS2SI64Zrr $xmm16
+ ; CHECK: $rdi = VCVTTSS2SI64Zrr_Int $xmm16
+ $rdi = VCVTTSS2SI64Zrr_Int $xmm16
+ ; CHECK: $edi = VCVTTSS2SIZrm $rdi, $xmm16, 1, $noreg, 0
+ $edi = VCVTTSS2SIZrm $rdi, $xmm16, 1, $noreg, 0
+ ; CHECK: $edi = VCVTTSS2SIZrm_Int $rdi, $xmm16, 1, $noreg, 0
+ $edi = VCVTTSS2SIZrm_Int $rdi, $xmm16, 1, $noreg, 0
+ ; CHECK: $edi = VCVTTSS2SIZrr $xmm16
+ $edi = VCVTTSS2SIZrr $xmm16
+ ; CHECK: $edi = VCVTTSS2SIZrr_Int $xmm16
+ $edi = VCVTTSS2SIZrr_Int $xmm16
+ ; CHECK: $xmm16 = VMOV64toSDZrr $rdi
+ $xmm16 = VMOV64toSDZrr $rdi
+ ; CHECK: $xmm16 = VMOVDI2SSZrm $rip, $noreg, $noreg, $noreg, $noreg
+ $xmm16 = VMOVDI2SSZrm $rip, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm16 = VMOVDI2SSZrr $eax
+ $xmm16 = VMOVDI2SSZrr $eax
+ ; CHECK: VMOVSDZmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg
+ VMOVSDZmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm16 = VMOVSDZrm $rip, $noreg, $noreg, $noreg, $noreg
+ $xmm16 = VMOVSDZrm $rip, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm16 = VMOVSDZrr $xmm16, $noreg
+ $xmm16 = VMOVSDZrr $xmm16, $noreg
+ ; CHECK: $xmm16 = VMOVSDZrr_REV $xmm16, $noreg
+ $xmm16 = VMOVSDZrr_REV $xmm16, $noreg
+ ; CHECK: $rax = VMOVSDto64Zrr $xmm16
+ $rax = VMOVSDto64Zrr $xmm16
+ ; CHECK: VMOVSDto64Zmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg
+ VMOVSDto64Zmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: VMOVSSZmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg
+ VMOVSSZmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm16 = VMOVSSZrm $rip, $noreg, $noreg, $noreg, $noreg
+ $xmm16 = VMOVSSZrm $rip, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm16 = VMOVSSZrr $xmm16, $noreg
+ $xmm16 = VMOVSSZrr $xmm16, $noreg
+ ; CHECK: $xmm16 = VMOVSSZrr_REV $xmm16, $noreg
+ $xmm16 = VMOVSSZrr_REV $xmm16, $noreg
+ ; CHECK: VMOVSS2DIZmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg
+ VMOVSS2DIZmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $eax = VMOVSS2DIZrr $xmm16
+ $eax = VMOVSS2DIZrr $xmm16
+ ; CHECK: $xmm16 = VMOV64toPQIZrr $rdi
+ $xmm16 = VMOV64toPQIZrr $rdi
+ ; CHECK: $xmm16 = VMOV64toPQIZrm $rdi, $noreg, $noreg, $noreg, $noreg
+ $xmm16 = VMOV64toPQIZrm $rdi, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm16 = VMOV64toSDZrr $rdi
+ $xmm16 = VMOV64toSDZrr $rdi
+ ; CHECK: $xmm16 = VMOVDI2PDIZrm $rip, $noreg, $noreg, $noreg, $noreg
+ $xmm16 = VMOVDI2PDIZrm $rip, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm16 = VMOVDI2PDIZrr $edi
+ $xmm16 = VMOVDI2PDIZrr $edi
+ ; CHECK: $xmm16 = VMOVLHPSZrr $xmm16, $noreg
+ $xmm16 = VMOVLHPSZrr $xmm16, $noreg
+ ; CHECK: $xmm16 = VMOVHLPSZrr $xmm16, $noreg
+ $xmm16 = VMOVHLPSZrr $xmm16, $noreg
+ ; CHECK: VMOVPDI2DIZmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg
+ VMOVPDI2DIZmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $edi = VMOVPDI2DIZrr $xmm16
+ $edi = VMOVPDI2DIZrr $xmm16
+ ; CHECK: $xmm16 = VMOVPQI2QIZrr $xmm16
+ $xmm16 = VMOVPQI2QIZrr $xmm16
+ ; CHECK: VMOVPQI2QIZmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg
+ VMOVPQI2QIZmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $rdi = VMOVPQIto64Zrr $xmm16
+ $rdi = VMOVPQIto64Zrr $xmm16
+ ; CHECK: VMOVPQIto64Zmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg
+ VMOVPQIto64Zmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm16 = VMOVQI2PQIZrm $rip, $noreg, $noreg, $noreg, $noreg
+ $xmm16 = VMOVQI2PQIZrm $rip, $noreg, $noreg, $noreg, $noreg
+ ; CHECK: $xmm16 = VMOVZPQILo2PQIZrr $xmm16
+ $xmm16 = VMOVZPQILo2PQIZrr $xmm16
+ ; CHECK: VCOMISDZrm_Int $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ VCOMISDZrm_Int $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ ; CHECK: VCOMISDZrr_Int $xmm16, $xmm1, implicit-def $eflags
+ VCOMISDZrr_Int $xmm16, $xmm1, implicit-def $eflags
+ ; CHECK: VCOMISSZrm_Int $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ VCOMISSZrm_Int $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ ; CHECK: VCOMISSZrr_Int $xmm16, $xmm1, implicit-def $eflags
+ VCOMISSZrr_Int $xmm16, $xmm1, implicit-def $eflags
+ ; CHECK: VUCOMISDZrm_Int $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ VUCOMISDZrm_Int $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ ; CHECK: VUCOMISDZrr_Int $xmm16, $xmm1, implicit-def $eflags
+ VUCOMISDZrr_Int $xmm16, $xmm1, implicit-def $eflags
+ ; CHECK: VUCOMISSZrm_Int $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ VUCOMISSZrm_Int $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ ; CHECK: VUCOMISSZrr_Int $xmm16, $xmm1, implicit-def $eflags
+ VUCOMISSZrr_Int $xmm16, $xmm1, implicit-def $eflags
+ ; CHECK: VCOMISDZrm $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ VCOMISDZrm $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ ; CHECK: VCOMISDZrr $xmm16, $xmm1, implicit-def $eflags
+ VCOMISDZrr $xmm16, $xmm1, implicit-def $eflags
+ ; CHECK: VCOMISSZrm $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ VCOMISSZrm $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ ; CHECK: VCOMISSZrr $xmm16, $xmm1, implicit-def $eflags
+ VCOMISSZrr $xmm16, $xmm1, implicit-def $eflags
+ ; CHECK: VUCOMISDZrm $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ VUCOMISDZrm $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ ; CHECK: VUCOMISDZrr $xmm16, $xmm1, implicit-def $eflags
+ VUCOMISDZrr $xmm16, $xmm1, implicit-def $eflags
+ ; CHECK: VUCOMISSZrm $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ VUCOMISSZrm $xmm16, $rdi, $noreg, $noreg, $noreg, $noreg, implicit-def $eflags
+ ; CHECK: VUCOMISSZrr $xmm16, $xmm1, implicit-def $eflags
+ VUCOMISSZrr $xmm16, $xmm1, implicit-def $eflags
- RET 0, %zmm0, %zmm1
+ RET 0, $zmm0, $zmm1
...
diff --git a/llvm/test/CodeGen/X86/expand-vr64-gr64-copy.mir b/llvm/test/CodeGen/X86/expand-vr64-gr64-copy.mir
index ceb7d39..59e767d 100644
--- a/llvm/test/CodeGen/X86/expand-vr64-gr64-copy.mir
+++ b/llvm/test/CodeGen/X86/expand-vr64-gr64-copy.mir
@@ -20,17 +20,17 @@
tracksRegLiveness: true
body: |
bb.0.entry:
- liveins: %xmm0
+ liveins: $xmm0
- %xmm0 = PSHUFDri killed %xmm0, -24
- MOVPQI2QImr %rsp, 1, %noreg, -8, %noreg, killed %xmm0
- %mm0 = PSWAPDrm %rsp, 1, %noreg, -8, %noreg
- ; CHECK: %rax = MMX_MOVD64from64rr %mm0
- ; CHECK-NEXT: %mm0 = MMX_MOVD64to64rr %rax
- %rax = COPY %mm0
- %mm0 = COPY %rax
- MMX_MOVQ64mr %rsp, 1, %noreg, -16, %noreg, killed %mm0
- %xmm0 = MOVQI2PQIrm %rsp, 1, %noreg, -16, %noreg
- %xmm0 = PSHUFDri killed %xmm0, -44
- RETQ %xmm0
+ $xmm0 = PSHUFDri killed $xmm0, -24
+ MOVPQI2QImr $rsp, 1, $noreg, -8, $noreg, killed $xmm0
+ $mm0 = PSWAPDrm $rsp, 1, $noreg, -8, $noreg
+ ; CHECK: $rax = MMX_MOVD64from64rr $mm0
+ ; CHECK-NEXT: $mm0 = MMX_MOVD64to64rr $rax
+ $rax = COPY $mm0
+ $mm0 = COPY $rax
+ MMX_MOVQ64mr $rsp, 1, $noreg, -16, $noreg, killed $mm0
+ $xmm0 = MOVQI2PQIrm $rsp, 1, $noreg, -16, $noreg
+ $xmm0 = PSHUFDri killed $xmm0, -44
+ RETQ $xmm0
...
diff --git a/llvm/test/CodeGen/X86/extractelement-index.ll b/llvm/test/CodeGen/X86/extractelement-index.ll
index 66bdfb8..a34ea7f 100644
--- a/llvm/test/CodeGen/X86/extractelement-index.ll
+++ b/llvm/test/CodeGen/X86/extractelement-index.ll
@@ -13,19 +13,19 @@
; SSE2: # %bb.0:
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: shrl $8, %eax
-; SSE2-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v16i8_1:
; SSE41: # %bb.0:
; SSE41-NEXT: pextrb $1, %xmm0, %eax
-; SSE41-NEXT: # kill: def %al killed %al killed %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
; SSE41-NEXT: retq
;
; AVX-LABEL: extractelement_v16i8_1:
; AVX: # %bb.0:
; AVX-NEXT: vpextrb $1, %xmm0, %eax
-; AVX-NEXT: # kill: def %al killed %al killed %eax
+; AVX-NEXT: # kill: def $al killed $al killed $eax
; AVX-NEXT: retq
%b = extractelement <16 x i8> %a, i256 1
ret i8 %b
@@ -36,19 +36,19 @@
; SSE2: # %bb.0:
; SSE2-NEXT: pextrw $5, %xmm0, %eax
; SSE2-NEXT: shrl $8, %eax
-; SSE2-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v16i8_11:
; SSE41: # %bb.0:
; SSE41-NEXT: pextrb $11, %xmm0, %eax
-; SSE41-NEXT: # kill: def %al killed %al killed %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
; SSE41-NEXT: retq
;
; AVX-LABEL: extractelement_v16i8_11:
; AVX: # %bb.0:
; AVX-NEXT: vpextrb $11, %xmm0, %eax
-; AVX-NEXT: # kill: def %al killed %al killed %eax
+; AVX-NEXT: # kill: def $al killed $al killed $eax
; AVX-NEXT: retq
%b = extractelement <16 x i8> %a, i256 11
ret i8 %b
@@ -58,19 +58,19 @@
; SSE2-LABEL: extractelement_v16i8_14:
; SSE2: # %bb.0:
; SSE2-NEXT: pextrw $7, %xmm0, %eax
-; SSE2-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v16i8_14:
; SSE41: # %bb.0:
; SSE41-NEXT: pextrb $14, %xmm0, %eax
-; SSE41-NEXT: # kill: def %al killed %al killed %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
; SSE41-NEXT: retq
;
; AVX-LABEL: extractelement_v16i8_14:
; AVX: # %bb.0:
; AVX-NEXT: vpextrb $14, %xmm0, %eax
-; AVX-NEXT: # kill: def %al killed %al killed %eax
+; AVX-NEXT: # kill: def $al killed $al killed $eax
; AVX-NEXT: retq
%b = extractelement <16 x i8> %a, i256 14
ret i8 %b
@@ -81,19 +81,19 @@
; SSE2: # %bb.0:
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: shrl $8, %eax
-; SSE2-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v32i8_1:
; SSE41: # %bb.0:
; SSE41-NEXT: pextrb $1, %xmm0, %eax
-; SSE41-NEXT: # kill: def %al killed %al killed %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
; SSE41-NEXT: retq
;
; AVX-LABEL: extractelement_v32i8_1:
; AVX: # %bb.0:
; AVX-NEXT: vpextrb $1, %xmm0, %eax
-; AVX-NEXT: # kill: def %al killed %al killed %eax
+; AVX-NEXT: # kill: def $al killed $al killed $eax
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%b = extractelement <32 x i8> %a, i256 1
@@ -105,20 +105,20 @@
; SSE2: # %bb.0:
; SSE2-NEXT: movd %xmm1, %eax
; SSE2-NEXT: shrl $8, %eax
-; SSE2-NEXT: # kill: def %al killed %al killed %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v32i8_17:
; SSE41: # %bb.0:
; SSE41-NEXT: pextrb $1, %xmm1, %eax
-; SSE41-NEXT: # kill: def %al killed %al killed %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
; SSE41-NEXT: retq
;
; AVX1-LABEL: extractelement_v32i8_17:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpextrb $1, %xmm0, %eax
-; AVX1-NEXT: # kill: def %al killed %al killed %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -126,7 +126,7 @@
; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpextrb $1, %xmm0, %eax
-; AVX2-NEXT: # kill: def %al killed %al killed %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
%b = extractelement <32 x i8> %a, i256 17
@@ -137,13 +137,13 @@
; SSE-LABEL: extractelement_v8i16_0:
; SSE: # %bb.0:
; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: # kill: def %ax killed %ax killed %eax
+; SSE-NEXT: # kill: def $ax killed $ax killed $eax
; SSE-NEXT: retq
;
; AVX-LABEL: extractelement_v8i16_0:
; AVX: # %bb.0:
; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX-NEXT: # kill: def $ax killed $ax killed $eax
; AVX-NEXT: retq
%b = extractelement <8 x i16> %a, i256 0
ret i16 %b
@@ -153,13 +153,13 @@
; SSE-LABEL: extractelement_v8i16_3:
; SSE: # %bb.0:
; SSE-NEXT: pextrw $3, %xmm0, %eax
-; SSE-NEXT: # kill: def %ax killed %ax killed %eax
+; SSE-NEXT: # kill: def $ax killed $ax killed $eax
; SSE-NEXT: retq
;
; AVX-LABEL: extractelement_v8i16_3:
; AVX: # %bb.0:
; AVX-NEXT: vpextrw $3, %xmm0, %eax
-; AVX-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX-NEXT: # kill: def $ax killed $ax killed $eax
; AVX-NEXT: retq
%b = extractelement <8 x i16> %a, i256 3
ret i16 %b
@@ -169,13 +169,13 @@
; SSE-LABEL: extractelement_v16i16_0:
; SSE: # %bb.0:
; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: # kill: def %ax killed %ax killed %eax
+; SSE-NEXT: # kill: def $ax killed $ax killed $eax
; SSE-NEXT: retq
;
; AVX-LABEL: extractelement_v16i16_0:
; AVX: # %bb.0:
; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX-NEXT: # kill: def $ax killed $ax killed $eax
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%b = extractelement <16 x i16> %a, i256 0
@@ -186,14 +186,14 @@
; SSE-LABEL: extractelement_v16i16_13:
; SSE: # %bb.0:
; SSE-NEXT: pextrw $5, %xmm1, %eax
-; SSE-NEXT: # kill: def %ax killed %ax killed %eax
+; SSE-NEXT: # kill: def $ax killed $ax killed $eax
; SSE-NEXT: retq
;
; AVX1-LABEL: extractelement_v16i16_13:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpextrw $5, %xmm0, %eax
-; AVX1-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -201,7 +201,7 @@
; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpextrw $5, %xmm0, %eax
-; AVX2-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
%b = extractelement <16 x i16> %a, i256 13
diff --git a/llvm/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll
index 47cd1ba..d403fee 100644
--- a/llvm/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll
@@ -43,7 +43,7 @@
; X32-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; X32-NEXT: vcvtps2ph $0, %xmm0, %xmm0
; X32-NEXT: vmovd %xmm0, %eax
-; X32-NEXT: # kill: def %ax killed %ax killed %eax
+; X32-NEXT: # kill: def $ax killed $ax killed $eax
; X32-NEXT: retl
;
; X64-LABEL: test_cvtss_sh:
@@ -52,7 +52,7 @@
; X64-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; X64-NEXT: vcvtps2ph $0, %xmm0, %xmm0
; X64-NEXT: vmovd %xmm0, %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%ins0 = insertelement <4 x float> undef, float %a0, i32 0
%ins1 = insertelement <4 x float> %ins0, float 0.000000e+00, i32 1
diff --git a/llvm/test/CodeGen/X86/fast-isel-cmp.ll b/llvm/test/CodeGen/X86/fast-isel-cmp.ll
index 355e6eb..e850e15 100644
--- a/llvm/test/CodeGen/X86/fast-isel-cmp.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-cmp.ll
@@ -10,7 +10,7 @@
; SDAG-NEXT: cmpeqss %xmm1, %xmm0
; SDAG-NEXT: movd %xmm0, %eax
; SDAG-NEXT: andl $1, %eax
-; SDAG-NEXT: ## kill: def %al killed %al killed %eax
+; SDAG-NEXT: ## kill: def $al killed $al killed $eax
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_oeq:
@@ -354,7 +354,7 @@
; SDAG-NEXT: cmpneqss %xmm1, %xmm0
; SDAG-NEXT: movd %xmm0, %eax
; SDAG-NEXT: andl $1, %eax
-; SDAG-NEXT: ## kill: def %al killed %al killed %eax
+; SDAG-NEXT: ## kill: def $al killed $al killed $eax
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_une:
@@ -594,7 +594,7 @@
; SDAG-NEXT: cmpeqss %xmm0, %xmm1
; SDAG-NEXT: movd %xmm1, %eax
; SDAG-NEXT: andl $1, %eax
-; SDAG-NEXT: ## kill: def %al killed %al killed %eax
+; SDAG-NEXT: ## kill: def $al killed $al killed $eax
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_oeq3:
@@ -1249,7 +1249,7 @@
; SDAG-NEXT: cmpneqss %xmm0, %xmm1
; SDAG-NEXT: movd %xmm1, %eax
; SDAG-NEXT: andl $1, %eax
-; SDAG-NEXT: ## kill: def %al killed %al killed %eax
+; SDAG-NEXT: ## kill: def $al killed $al killed $eax
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_une3:
diff --git a/llvm/test/CodeGen/X86/fast-isel-nontemporal.ll b/llvm/test/CodeGen/X86/fast-isel-nontemporal.ll
index 79e9630..db1ebfe 100644
--- a/llvm/test/CodeGen/X86/fast-isel-nontemporal.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-nontemporal.ll
@@ -547,7 +547,7 @@
; AVX1-LABEL: test_load_nt8xfloat:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
-; AVX1-NEXT: # implicit-def: %ymm1
+; AVX1-NEXT: # implicit-def: $ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
@@ -589,7 +589,7 @@
; AVX1-LABEL: test_load_nt4xdouble:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
-; AVX1-NEXT: # implicit-def: %ymm1
+; AVX1-NEXT: # implicit-def: $ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
@@ -631,7 +631,7 @@
; AVX1-LABEL: test_load_nt32xi8:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
-; AVX1-NEXT: # implicit-def: %ymm1
+; AVX1-NEXT: # implicit-def: $ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
@@ -673,7 +673,7 @@
; AVX1-LABEL: test_load_nt16xi16:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
-; AVX1-NEXT: # implicit-def: %ymm1
+; AVX1-NEXT: # implicit-def: $ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
@@ -715,7 +715,7 @@
; AVX1-LABEL: test_load_nt8xi32:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
-; AVX1-NEXT: # implicit-def: %ymm1
+; AVX1-NEXT: # implicit-def: $ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
@@ -757,7 +757,7 @@
; AVX1-LABEL: test_load_nt4xi64:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
-; AVX1-NEXT: # implicit-def: %ymm1
+; AVX1-NEXT: # implicit-def: $ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
@@ -997,12 +997,12 @@
; AVX1-LABEL: test_load_nt16xfloat:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
-; AVX1-NEXT: # implicit-def: %ymm1
+; AVX1-NEXT: # implicit-def: $ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2
-; AVX1-NEXT: # implicit-def: %ymm1
+; AVX1-NEXT: # implicit-def: $ymm1
; AVX1-NEXT: vmovaps %xmm2, %xmm1
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
@@ -1051,12 +1051,12 @@
; AVX1-LABEL: test_load_nt8xdouble:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
-; AVX1-NEXT: # implicit-def: %ymm1
+; AVX1-NEXT: # implicit-def: $ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2
-; AVX1-NEXT: # implicit-def: %ymm1
+; AVX1-NEXT: # implicit-def: $ymm1
; AVX1-NEXT: vmovaps %xmm2, %xmm1
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
@@ -1105,12 +1105,12 @@
; AVX1-LABEL: test_load_nt64xi8:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
-; AVX1-NEXT: # implicit-def: %ymm1
+; AVX1-NEXT: # implicit-def: $ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2
-; AVX1-NEXT: # implicit-def: %ymm1
+; AVX1-NEXT: # implicit-def: $ymm1
; AVX1-NEXT: vmovaps %xmm2, %xmm1
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
@@ -1171,12 +1171,12 @@
; AVX1-LABEL: test_load_nt32xi16:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
-; AVX1-NEXT: # implicit-def: %ymm1
+; AVX1-NEXT: # implicit-def: $ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2
-; AVX1-NEXT: # implicit-def: %ymm1
+; AVX1-NEXT: # implicit-def: $ymm1
; AVX1-NEXT: vmovaps %xmm2, %xmm1
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
@@ -1237,12 +1237,12 @@
; AVX1-LABEL: test_load_nt16xi32:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
-; AVX1-NEXT: # implicit-def: %ymm1
+; AVX1-NEXT: # implicit-def: $ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2
-; AVX1-NEXT: # implicit-def: %ymm1
+; AVX1-NEXT: # implicit-def: $ymm1
; AVX1-NEXT: vmovaps %xmm2, %xmm1
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
@@ -1291,12 +1291,12 @@
; AVX1-LABEL: test_load_nt8xi64:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
-; AVX1-NEXT: # implicit-def: %ymm1
+; AVX1-NEXT: # implicit-def: $ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2
-; AVX1-NEXT: # implicit-def: %ymm1
+; AVX1-NEXT: # implicit-def: $ymm1
; AVX1-NEXT: vmovaps %xmm2, %xmm1
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
diff --git a/llvm/test/CodeGen/X86/fast-isel-sext-zext.ll b/llvm/test/CodeGen/X86/fast-isel-sext-zext.ll
index 92344a5c..5e54c98 100644
--- a/llvm/test/CodeGen/X86/fast-isel-sext-zext.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-sext-zext.ll
@@ -30,7 +30,7 @@
; X32-NEXT: andb $1, %al
; X32-NEXT: negb %al
; X32-NEXT: movsbl %al, %eax
-; X32-NEXT: ## kill: def %ax killed %ax killed %eax
+; X32-NEXT: ## kill: def $ax killed $ax killed $eax
; X32-NEXT: retl
; X32-NEXT: ## -- End function
;
@@ -39,7 +39,7 @@
; X64-NEXT: andb $1, %dil
; X64-NEXT: negb %dil
; X64-NEXT: movsbl %dil, %eax
-; X64-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
; X64-NEXT: ## -- End function
%z = trunc i16 %x to i1
@@ -116,7 +116,7 @@
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andb $1, %al
; X32-NEXT: movzbl %al, %eax
-; X32-NEXT: ## kill: def %ax killed %ax killed %eax
+; X32-NEXT: ## kill: def $ax killed $ax killed $eax
; X32-NEXT: retl
; X32-NEXT: ## -- End function
;
@@ -124,7 +124,7 @@
; X64: ## %bb.0:
; X64-NEXT: andb $1, %dil
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
; X64-NEXT: ## -- End function
%z = trunc i16 %x to i1
@@ -176,14 +176,14 @@
; X32-LABEL: test9:
; X32: ## %bb.0:
; X32-NEXT: movsbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: ## kill: def %ax killed %ax killed %eax
+; X32-NEXT: ## kill: def $ax killed $ax killed $eax
; X32-NEXT: retl
; X32-NEXT: ## -- End function
;
; X64-LABEL: test9:
; X64: ## %bb.0:
; X64-NEXT: movsbl %dil, %eax
-; X64-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
; X64-NEXT: ## -- End function
%u = sext i8 %x to i16
@@ -228,14 +228,14 @@
; X32-LABEL: test12:
; X32: ## %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: ## kill: def %ax killed %ax killed %eax
+; X32-NEXT: ## kill: def $ax killed $ax killed $eax
; X32-NEXT: retl
; X32-NEXT: ## -- End function
;
; X64-LABEL: test12:
; X64: ## %bb.0:
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
; X64-NEXT: ## -- End function
%u = zext i8 %x to i16
diff --git a/llvm/test/CodeGen/X86/fast-isel-shift.ll b/llvm/test/CodeGen/X86/fast-isel-shift.ll
index 5d416e1..3699b7ba 100644
--- a/llvm/test/CodeGen/X86/fast-isel-shift.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-shift.ll
@@ -16,7 +16,7 @@
; CHECK-LABEL: shl_i16:
; CHECK: ## %bb.0:
; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: ## kill: def %cl killed %cx
+; CHECK-NEXT: ## kill: def $cl killed $cx
; CHECK-NEXT: shlw %cl, %di
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -28,7 +28,7 @@
; CHECK-LABEL: shl_i32:
; CHECK: ## %bb.0:
; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: ## kill: def %cl killed %ecx
+; CHECK-NEXT: ## kill: def $cl killed $ecx
; CHECK-NEXT: shll %cl, %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -40,7 +40,7 @@
; CHECK-LABEL: shl_i64:
; CHECK: ## %bb.0:
; CHECK-NEXT: movq %rsi, %rcx
-; CHECK-NEXT: ## kill: def %cl killed %rcx
+; CHECK-NEXT: ## kill: def $cl killed $rcx
; CHECK-NEXT: shlq %cl, %rdi
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: retq
@@ -63,7 +63,7 @@
; CHECK-LABEL: lshr_i16:
; CHECK: ## %bb.0:
; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: ## kill: def %cl killed %cx
+; CHECK-NEXT: ## kill: def $cl killed $cx
; CHECK-NEXT: shrw %cl, %di
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -75,7 +75,7 @@
; CHECK-LABEL: lshr_i32:
; CHECK: ## %bb.0:
; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: ## kill: def %cl killed %ecx
+; CHECK-NEXT: ## kill: def $cl killed $ecx
; CHECK-NEXT: shrl %cl, %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -87,7 +87,7 @@
; CHECK-LABEL: lshr_i64:
; CHECK: ## %bb.0:
; CHECK-NEXT: movq %rsi, %rcx
-; CHECK-NEXT: ## kill: def %cl killed %rcx
+; CHECK-NEXT: ## kill: def $cl killed $rcx
; CHECK-NEXT: shrq %cl, %rdi
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: retq
@@ -110,7 +110,7 @@
; CHECK-LABEL: ashr_i16:
; CHECK: ## %bb.0:
; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: ## kill: def %cl killed %cx
+; CHECK-NEXT: ## kill: def $cl killed $cx
; CHECK-NEXT: sarw %cl, %di
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -122,7 +122,7 @@
; CHECK-LABEL: ashr_i32:
; CHECK: ## %bb.0:
; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: ## kill: def %cl killed %ecx
+; CHECK-NEXT: ## kill: def $cl killed $ecx
; CHECK-NEXT: sarl %cl, %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -134,7 +134,7 @@
; CHECK-LABEL: ashr_i64:
; CHECK: ## %bb.0:
; CHECK-NEXT: movq %rsi, %rcx
-; CHECK-NEXT: ## kill: def %cl killed %rcx
+; CHECK-NEXT: ## kill: def $cl killed $rcx
; CHECK-NEXT: sarq %cl, %rdi
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: retq
@@ -155,9 +155,9 @@
define i16 @shl_imm1_i16(i16 %a) {
; CHECK-LABEL: shl_imm1_i16:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: leal (,%rdi,2), %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%c = shl i16 %a, 1
ret i16 %c
@@ -166,7 +166,7 @@
define i32 @shl_imm1_i32(i32 %a) {
; CHECK-LABEL: shl_imm1_i32:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: leal (,%rdi,2), %eax
; CHECK-NEXT: retq
%c = shl i32 %a, 1
diff --git a/llvm/test/CodeGen/X86/fixup-bw-copy.ll b/llvm/test/CodeGen/X86/fixup-bw-copy.ll
index dead278..4f6d2e3 100644
--- a/llvm/test/CodeGen/X86/fixup-bw-copy.ll
+++ b/llvm/test/CodeGen/X86/fixup-bw-copy.ll
@@ -54,7 +54,7 @@
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shrl $8, %eax
; X64-NEXT: addb %dil, %al
-; X64-NEXT: # kill: def %al killed %al killed %eax
+; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
;
; X32-LABEL: test_movb_hreg:
diff --git a/llvm/test/CodeGen/X86/fixup-bw-copy.mir b/llvm/test/CodeGen/X86/fixup-bw-copy.mir
index bbb60ad..d9dc5f2 100644
--- a/llvm/test/CodeGen/X86/fixup-bw-copy.mir
+++ b/llvm/test/CodeGen/X86/fixup-bw-copy.mir
@@ -40,14 +40,14 @@
name: test_movb_killed
tracksRegLiveness: true
liveins:
- - { reg: '%edi' }
+ - { reg: '$edi' }
body: |
bb.0 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- ; CHECK: %eax = MOV32rr undef %edi, implicit %dil
- %al = MOV8rr killed %dil
- RETQ killed %al
+ ; CHECK: $eax = MOV32rr undef $edi, implicit $dil
+ $al = MOV8rr killed $dil
+ RETQ killed $al
...
@@ -55,14 +55,14 @@
name: test_movb_impuse
tracksRegLiveness: true
liveins:
- - { reg: '%edi' }
+ - { reg: '$edi' }
body: |
bb.0 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- ; CHECK: %eax = MOV32rr undef %edi, implicit %dil
- %al = MOV8rr %dil, implicit %edi
- RETQ killed %al
+ ; CHECK: $eax = MOV32rr undef $edi, implicit $dil
+ $al = MOV8rr $dil, implicit $edi
+ RETQ killed $al
...
@@ -70,14 +70,14 @@
name: test_movb_impdef_gr64
tracksRegLiveness: true
liveins:
- - { reg: '%edi' }
+ - { reg: '$edi' }
body: |
bb.0 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- ; CHECK: %eax = MOV32rr undef %edi, implicit %dil, implicit-def %rax
- %al = MOV8rr %dil, implicit-def %rax
- RETQ killed %al
+ ; CHECK: $eax = MOV32rr undef $edi, implicit $dil, implicit-def $rax
+ $al = MOV8rr $dil, implicit-def $rax
+ RETQ killed $al
...
@@ -85,14 +85,14 @@
name: test_movb_impdef_gr32
tracksRegLiveness: true
liveins:
- - { reg: '%edi' }
+ - { reg: '$edi' }
body: |
bb.0 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- ; CHECK: %eax = MOV32rr undef %edi, implicit %dil
- %al = MOV8rr %dil, implicit-def %eax
- RETQ killed %al
+ ; CHECK: $eax = MOV32rr undef $edi, implicit $dil
+ $al = MOV8rr $dil, implicit-def $eax
+ RETQ killed $al
...
@@ -100,14 +100,14 @@
name: test_movb_impdef_gr16
tracksRegLiveness: true
liveins:
- - { reg: '%edi' }
+ - { reg: '$edi' }
body: |
bb.0 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- ; CHECK: %eax = MOV32rr undef %edi, implicit %dil
- %al = MOV8rr %dil, implicit-def %ax
- RETQ killed %al
+ ; CHECK: $eax = MOV32rr undef $edi, implicit $dil
+ $al = MOV8rr $dil, implicit-def $ax
+ RETQ killed $al
...
@@ -115,14 +115,14 @@
name: test_movw_impdef_gr32
tracksRegLiveness: true
liveins:
- - { reg: '%edi' }
+ - { reg: '$edi' }
body: |
bb.0 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- ; CHECK: %eax = MOV32rr undef %edi, implicit %di
- %ax = MOV16rr %di, implicit-def %eax
- RETQ killed %ax
+ ; CHECK: $eax = MOV32rr undef $edi, implicit $di
+ $ax = MOV16rr $di, implicit-def $eax
+ RETQ killed $ax
...
@@ -130,13 +130,13 @@
name: test_movw_impdef_gr64
tracksRegLiveness: true
liveins:
- - { reg: '%edi' }
+ - { reg: '$edi' }
body: |
bb.0 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- ; CHECK: %eax = MOV32rr undef %edi, implicit %di, implicit-def %rax
- %ax = MOV16rr %di, implicit-def %rax
- RETQ killed %ax
+ ; CHECK: $eax = MOV32rr undef $edi, implicit $di, implicit-def $rax
+ $ax = MOV16rr $di, implicit-def $rax
+ RETQ killed $ax
...
diff --git a/llvm/test/CodeGen/X86/fixup-bw-inst.mir b/llvm/test/CodeGen/X86/fixup-bw-inst.mir
index e26fa72..c5ccaa4 100644
--- a/llvm/test/CodeGen/X86/fixup-bw-inst.mir
+++ b/llvm/test/CodeGen/X86/fixup-bw-inst.mir
@@ -42,20 +42,20 @@
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rax' }
-# Verify that "movw (%rax), %ax" is changed to "movzwl (%rax), %rax".
+ - { reg: '$rax' }
+# Verify that "movw ($rax), $ax" is changed to "movzwl ($rax), $rax".
#
# For that to happen, the liveness information after the MOV16rm
-# instruction should be used, not before it because %rax is live
+# instruction should be used, not before it because $rax is live
# before the MOV and is killed by it.
body: |
bb.0:
- liveins: %rax
+ liveins: $rax
- %ax = MOV16rm killed %rax, 1, %noreg, 0, %noreg
- ; CHECK: %eax = MOVZX32rm16 killed %rax
+ $ax = MOV16rm killed $rax, 1, $noreg, 0, $noreg
+ ; CHECK: $eax = MOVZX32rm16 killed $rax
- RETQ %ax
+ RETQ $ax
...
---
@@ -64,19 +64,19 @@
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rax' }
+ - { reg: '$rax' }
# Imp-use of any super-register means the register is live before the MOV
body: |
bb.0:
- liveins: %dl, %rbx, %rcx, %r14
+ liveins: $dl, $rbx, $rcx, $r14
- %cl = MOV8rr killed %dl, implicit killed %rcx, implicit-def %rcx
- ; CHECK: %cl = MOV8rr killed %dl, implicit killed %rcx, implicit-def %rcx
+ $cl = MOV8rr killed $dl, implicit killed $rcx, implicit-def $rcx
+ ; CHECK: $cl = MOV8rr killed $dl, implicit killed $rcx, implicit-def $rcx
JMP_1 %bb.1
bb.1:
- liveins: %rcx
+ liveins: $rcx
- RETQ %cl
+ RETQ $cl
...
---
@@ -85,8 +85,8 @@
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
-# After MOV16rm the whole %eax is not *really* live, as can be seen by
+ - { reg: '$rdi' }
+# After MOV16rm the whole $eax is not *really* live, as can be seen by
# missing implicit-uses of it in that MOV. Make sure that MOV is
# transformed into MOVZX.
# See the comment near the original IR on what preceding decisions can
@@ -94,23 +94,23 @@
body: |
bb.0.entry:
successors: %bb.1(0x30000000), %bb.2.if.then(0x50000000)
- liveins: %rdi
+ liveins: $rdi
- TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.1, implicit %eflags
+ TEST64rr $rdi, $rdi, implicit-def $eflags
+ JE_1 %bb.1, implicit $eflags
bb.2.if.then:
- liveins: %rdi
+ liveins: $rdi
- %ax = MOV16rm killed %rdi, 1, %noreg, 0, %noreg, implicit-def %eax :: (load 2 from %ir.p)
- ; CHECK: %eax = MOVZX32rm16 killed %rdi, 1, %noreg, 0, %noreg, implicit-def %eax :: (load 2 from %ir.p)
- %ax = KILL %ax, implicit killed %eax
- RETQ %ax
+ $ax = MOV16rm killed $rdi, 1, $noreg, 0, $noreg, implicit-def $eax :: (load 2 from %ir.p)
+ ; CHECK: $eax = MOVZX32rm16 killed $rdi, 1, $noreg, 0, $noreg, implicit-def $eax :: (load 2 from %ir.p)
+ $ax = KILL $ax, implicit killed $eax
+ RETQ $ax
bb.1:
- %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags
- %ax = KILL %ax, implicit killed %eax
- RETQ %ax
+ $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags
+ $ax = KILL $ax, implicit killed $eax
+ RETQ $ax
...
---
@@ -119,18 +119,18 @@
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%r9d' }
+ - { reg: '$r9d' }
# This code copies r10b into r9b and then uses r9w. We would like to promote
# the copy to a 32-bit copy, but because r9w is used this is not acceptable.
body: |
bb.0.entry:
- liveins: %r9d
+ liveins: $r9d
- %r9b = MOV8rr undef %r10b, implicit-def %r9d, implicit killed %r9d, implicit-def %eflags
- ; CHECK: %r9b = MOV8rr undef %r10b, implicit-def %r9d, implicit killed %r9d, implicit-def %eflags
+ $r9b = MOV8rr undef $r10b, implicit-def $r9d, implicit killed $r9d, implicit-def $eflags
+ ; CHECK: $r9b = MOV8rr undef $r10b, implicit-def $r9d, implicit killed $r9d, implicit-def $eflags
- %ax = OR16rr undef %ax, %r9w, implicit-def %eflags
- RETQ %ax
+ $ax = OR16rr undef $ax, $r9w, implicit-def $eflags
+ RETQ $ax
...
---
@@ -139,14 +139,14 @@
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%ch', reg: '%bl' }
+ - { reg: '$ch', reg: '$bl' }
body: |
bb.0:
- liveins: %ch, %bl
+ liveins: $ch, $bl
- %cl = MOV8rr %bl, implicit-def %cx, implicit killed %ch, implicit-def %eflags
- ; CHECK: %cl = MOV8rr %bl, implicit-def %cx, implicit killed %ch, implicit-def %eflags
+ $cl = MOV8rr $bl, implicit-def $cx, implicit killed $ch, implicit-def $eflags
+ ; CHECK: $cl = MOV8rr $bl, implicit-def $cx, implicit killed $ch, implicit-def $eflags
- RETQ %cx
+ RETQ $cx
...
diff --git a/llvm/test/CodeGen/X86/gpr-to-mask.ll b/llvm/test/CodeGen/X86/gpr-to-mask.ll
index ead07ad..aae551b 100644
--- a/llvm/test/CodeGen/X86/gpr-to-mask.ll
+++ b/llvm/test/CodeGen/X86/gpr-to-mask.ll
@@ -260,8 +260,8 @@
define void @test_shl1(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) {
; X86-64-LABEL: test_shl1:
; X86-64: # %bb.0: # %entry
-; X86-64-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; X86-64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; X86-64-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; X86-64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; X86-64-NEXT: testb $1, %dil
; X86-64-NEXT: je .LBB5_2
; X86-64-NEXT: # %bb.1: # %if
@@ -278,8 +278,8 @@
;
; X86-32-LABEL: test_shl1:
; X86-32: # %bb.0: # %entry
-; X86-32-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; X86-32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; X86-32-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; X86-32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; X86-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-32-NEXT: testb $1, {{[0-9]+}}(%esp)
; X86-32-NEXT: je .LBB5_2
@@ -319,8 +319,8 @@
define void @test_shr1(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) {
; X86-64-LABEL: test_shr1:
; X86-64: # %bb.0: # %entry
-; X86-64-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; X86-64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; X86-64-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; X86-64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; X86-64-NEXT: testb $1, %dil
; X86-64-NEXT: je .LBB6_2
; X86-64-NEXT: # %bb.1: # %if
@@ -338,8 +338,8 @@
;
; X86-32-LABEL: test_shr1:
; X86-32: # %bb.0: # %entry
-; X86-32-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; X86-32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; X86-32-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; X86-32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; X86-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-32-NEXT: testb $1, {{[0-9]+}}(%esp)
; X86-32-NEXT: je .LBB6_2
@@ -380,8 +380,8 @@
define void @test_shr2(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) {
; X86-64-LABEL: test_shr2:
; X86-64: # %bb.0: # %entry
-; X86-64-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; X86-64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; X86-64-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; X86-64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; X86-64-NEXT: testb $1, %dil
; X86-64-NEXT: je .LBB7_2
; X86-64-NEXT: # %bb.1: # %if
@@ -398,8 +398,8 @@
;
; X86-32-LABEL: test_shr2:
; X86-32: # %bb.0: # %entry
-; X86-32-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; X86-32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; X86-32-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; X86-32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; X86-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-32-NEXT: testb $1, {{[0-9]+}}(%esp)
; X86-32-NEXT: je .LBB7_2
@@ -439,8 +439,8 @@
define void @test_shl(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) {
; X86-64-LABEL: test_shl:
; X86-64: # %bb.0: # %entry
-; X86-64-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; X86-64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; X86-64-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; X86-64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; X86-64-NEXT: testb $1, %dil
; X86-64-NEXT: je .LBB8_2
; X86-64-NEXT: # %bb.1: # %if
@@ -457,8 +457,8 @@
;
; X86-32-LABEL: test_shl:
; X86-32: # %bb.0: # %entry
-; X86-32-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; X86-32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; X86-32-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; X86-32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; X86-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-32-NEXT: testb $1, {{[0-9]+}}(%esp)
; X86-32-NEXT: je .LBB8_2
@@ -498,8 +498,8 @@
define void @test_add(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) {
; X86-64-LABEL: test_add:
; X86-64: # %bb.0: # %entry
-; X86-64-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; X86-64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; X86-64-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; X86-64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; X86-64-NEXT: kmovb (%rsi), %k0
; X86-64-NEXT: kmovb (%rdx), %k1
; X86-64-NEXT: testb $1, %dil
@@ -517,8 +517,8 @@
;
; X86-32-LABEL: test_add:
; X86-32: # %bb.0: # %entry
-; X86-32-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; X86-32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; X86-32-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; X86-32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; X86-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-32-NEXT: movl {{[0-9]+}}(%esp), %edx
diff --git a/llvm/test/CodeGen/X86/greedy_regalloc_bad_eviction_sequence.ll b/llvm/test/CodeGen/X86/greedy_regalloc_bad_eviction_sequence.ll
index 4e39db5..790bab9 100644
--- a/llvm/test/CodeGen/X86/greedy_regalloc_bad_eviction_sequence.ll
+++ b/llvm/test/CodeGen/X86/greedy_regalloc_bad_eviction_sequence.ll
@@ -5,23 +5,23 @@
; This test is meant to make sure bad eviction sequence like the one described
; below does not occur
;
-; movl %ebp, 8(%esp) # 4-byte Spill
+; movl %ebp, 8($esp) # 4-byte Spill
; movl %ecx, %ebp
; movl %ebx, %ecx
-; movl %edi, %ebx
-; movl %edx, %edi
+; movl $edi, %ebx
+; movl $edx, $edi
; cltd
; idivl %esi
-; movl %edi, %edx
-; movl %ebx, %edi
+; movl $edi, $edx
+; movl %ebx, $edi
; movl %ecx, %ebx
; movl %ebp, %ecx
-; movl 16(%esp), %ebp # 4 - byte Reload
+; movl 16($esp), %ebp # 4 - byte Reload
; Make sure we have no redundant copies in the problematic code seqtion
; CHECK-LABEL: name: bar
; CHECK: bb.3.for.body:
-; CHECK: %eax = COPY
+; CHECK: $eax = COPY
; CHECK-NEXT: CDQ
; CHECK-NEXT: IDIV32r
; CHECK-NEXT: ADD32rr
diff --git a/llvm/test/CodeGen/X86/half.ll b/llvm/test/CodeGen/X86/half.ll
index b438e84..3bc176f 100644
--- a/llvm/test/CodeGen/X86/half.ll
+++ b/llvm/test/CodeGen/X86/half.ll
@@ -777,7 +777,7 @@
; BWON-F16C-NEXT: callq __truncdfhf2
; BWON-F16C-NEXT: movl %eax, %r15d
; BWON-F16C-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; BWON-F16C-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; BWON-F16C-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; BWON-F16C-NEXT: vzeroupper
; BWON-F16C-NEXT: callq __truncdfhf2
; BWON-F16C-NEXT: movl %eax, %ebp
diff --git a/llvm/test/CodeGen/X86/horizontal-reduce-smax.ll b/llvm/test/CodeGen/X86/horizontal-reduce-smax.ll
index fa92158..11787c8 100644
--- a/llvm/test/CodeGen/X86/horizontal-reduce-smax.ll
+++ b/llvm/test/CodeGen/X86/horizontal-reduce-smax.ll
@@ -206,7 +206,7 @@
; X86-SSE2-NEXT: psrld $16, %xmm1
; X86-SSE2-NEXT: pmaxsw %xmm0, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v8i16:
@@ -216,7 +216,7 @@
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v8i16:
@@ -226,7 +226,7 @@
; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vmovd %xmm0, %eax
-; X86-AVX-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-AVX-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v8i16:
@@ -239,7 +239,7 @@
; X64-SSE2-NEXT: psrld $16, %xmm1
; X64-SSE2-NEXT: pmaxsw %xmm0, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v8i16:
@@ -249,7 +249,7 @@
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v8i16:
@@ -259,7 +259,7 @@
; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vmovd %xmm0, %eax
-; X64-AVX-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX-NEXT: retq
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = icmp sgt <8 x i16> %a0, %1
@@ -304,7 +304,7 @@
; X86-SSE2-NEXT: pandn %xmm0, %xmm1
; X86-SSE2-NEXT: por %xmm2, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax
+; X86-SSE2-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i8:
@@ -317,7 +317,7 @@
; X86-SSE42-NEXT: phminposuw %xmm2, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax
+; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v16i8:
@@ -329,7 +329,7 @@
; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX-NEXT: ## kill: def %al killed %al killed %eax
+; X86-AVX-NEXT: ## kill: def $al killed $al killed $eax
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v16i8:
@@ -361,7 +361,7 @@
; X64-SSE2-NEXT: pandn %xmm0, %xmm1
; X64-SSE2-NEXT: por %xmm2, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax
+; X64-SSE2-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i8:
@@ -374,7 +374,7 @@
; X64-SSE42-NEXT: phminposuw %xmm2, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax
+; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v16i8:
@@ -386,7 +386,7 @@
; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX-NEXT: retq
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = icmp sgt <16 x i8> %a0, %1
@@ -736,7 +736,7 @@
; X86-SSE2-NEXT: psrld $16, %xmm1
; X86-SSE2-NEXT: pmaxsw %xmm0, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i16:
@@ -747,7 +747,7 @@
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v16i16:
@@ -759,7 +759,7 @@
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovd %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -772,7 +772,7 @@
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -787,7 +787,7 @@
; X64-SSE2-NEXT: psrld $16, %xmm1
; X64-SSE2-NEXT: pmaxsw %xmm0, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i16:
@@ -798,7 +798,7 @@
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v16i16:
@@ -810,7 +810,7 @@
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -823,7 +823,7 @@
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vmovd %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -836,7 +836,7 @@
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vmovd %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX512-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -890,7 +890,7 @@
; X86-SSE2-NEXT: pandn %xmm0, %xmm2
; X86-SSE2-NEXT: por %xmm1, %xmm2
; X86-SSE2-NEXT: movd %xmm2, %eax
-; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax
+; X86-SSE2-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i8:
@@ -904,7 +904,7 @@
; X86-SSE42-NEXT: phminposuw %xmm2, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax
+; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i8:
@@ -918,7 +918,7 @@
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: def %al killed %al killed %eax
+; X86-AVX1-NEXT: ## kill: def $al killed $al killed $eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -933,7 +933,7 @@
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: def %al killed %al killed %eax
+; X86-AVX2-NEXT: ## kill: def $al killed $al killed $eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -971,7 +971,7 @@
; X64-SSE2-NEXT: pandn %xmm0, %xmm2
; X64-SSE2-NEXT: por %xmm1, %xmm2
; X64-SSE2-NEXT: movd %xmm2, %eax
-; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax
+; X64-SSE2-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i8:
@@ -985,7 +985,7 @@
; X64-SSE42-NEXT: phminposuw %xmm2, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax
+; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i8:
@@ -999,7 +999,7 @@
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX1-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -1014,7 +1014,7 @@
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX2-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -1029,7 +1029,7 @@
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX512-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -1526,7 +1526,7 @@
; X86-SSE2-NEXT: psrld $16, %xmm1
; X86-SSE2-NEXT: pmaxsw %xmm0, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i16:
@@ -1539,7 +1539,7 @@
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i16:
@@ -1554,7 +1554,7 @@
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovd %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -1568,7 +1568,7 @@
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -1585,7 +1585,7 @@
; X64-SSE2-NEXT: psrld $16, %xmm1
; X64-SSE2-NEXT: pmaxsw %xmm0, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i16:
@@ -1598,7 +1598,7 @@
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i16:
@@ -1613,7 +1613,7 @@
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -1627,7 +1627,7 @@
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vmovd %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -1642,7 +1642,7 @@
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vmovd %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX512-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -1709,7 +1709,7 @@
; X86-SSE2-NEXT: pandn %xmm0, %xmm1
; X86-SSE2-NEXT: por %xmm2, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax
+; X86-SSE2-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v64i8:
@@ -1725,7 +1725,7 @@
; X86-SSE42-NEXT: phminposuw %xmm2, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax
+; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v64i8:
@@ -1742,7 +1742,7 @@
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: def %al killed %al killed %eax
+; X86-AVX1-NEXT: ## kill: def $al killed $al killed $eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -1758,7 +1758,7 @@
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: def %al killed %al killed %eax
+; X86-AVX2-NEXT: ## kill: def $al killed $al killed $eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -1806,7 +1806,7 @@
; X64-SSE2-NEXT: pandn %xmm0, %xmm1
; X64-SSE2-NEXT: por %xmm2, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax
+; X64-SSE2-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v64i8:
@@ -1822,7 +1822,7 @@
; X64-SSE42-NEXT: phminposuw %xmm2, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax
+; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v64i8:
@@ -1839,7 +1839,7 @@
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX1-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -1855,7 +1855,7 @@
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX2-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -1872,7 +1872,7 @@
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX512-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <64 x i8> %a0, <64 x i8> undef, <64 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
diff --git a/llvm/test/CodeGen/X86/horizontal-reduce-smin.ll b/llvm/test/CodeGen/X86/horizontal-reduce-smin.ll
index fa5828a..72df0f7 100644
--- a/llvm/test/CodeGen/X86/horizontal-reduce-smin.ll
+++ b/llvm/test/CodeGen/X86/horizontal-reduce-smin.ll
@@ -208,7 +208,7 @@
; X86-SSE2-NEXT: psrld $16, %xmm1
; X86-SSE2-NEXT: pminsw %xmm0, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v8i16:
@@ -218,7 +218,7 @@
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v8i16:
@@ -228,7 +228,7 @@
; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vmovd %xmm0, %eax
-; X86-AVX-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-AVX-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v8i16:
@@ -241,7 +241,7 @@
; X64-SSE2-NEXT: psrld $16, %xmm1
; X64-SSE2-NEXT: pminsw %xmm0, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v8i16:
@@ -251,7 +251,7 @@
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v8i16:
@@ -261,7 +261,7 @@
; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vmovd %xmm0, %eax
-; X64-AVX-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX-NEXT: retq
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = icmp slt <8 x i16> %a0, %1
@@ -306,7 +306,7 @@
; X86-SSE2-NEXT: pandn %xmm0, %xmm1
; X86-SSE2-NEXT: por %xmm2, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax
+; X86-SSE2-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i8:
@@ -319,7 +319,7 @@
; X86-SSE42-NEXT: phminposuw %xmm2, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax
+; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v16i8:
@@ -331,7 +331,7 @@
; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX-NEXT: ## kill: def %al killed %al killed %eax
+; X86-AVX-NEXT: ## kill: def $al killed $al killed $eax
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v16i8:
@@ -363,7 +363,7 @@
; X64-SSE2-NEXT: pandn %xmm0, %xmm1
; X64-SSE2-NEXT: por %xmm2, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax
+; X64-SSE2-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i8:
@@ -376,7 +376,7 @@
; X64-SSE42-NEXT: phminposuw %xmm2, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax
+; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v16i8:
@@ -388,7 +388,7 @@
; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX-NEXT: retq
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = icmp slt <16 x i8> %a0, %1
@@ -740,7 +740,7 @@
; X86-SSE2-NEXT: psrld $16, %xmm1
; X86-SSE2-NEXT: pminsw %xmm0, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i16:
@@ -751,7 +751,7 @@
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v16i16:
@@ -763,7 +763,7 @@
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovd %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -776,7 +776,7 @@
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -791,7 +791,7 @@
; X64-SSE2-NEXT: psrld $16, %xmm1
; X64-SSE2-NEXT: pminsw %xmm0, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i16:
@@ -802,7 +802,7 @@
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v16i16:
@@ -814,7 +814,7 @@
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -827,7 +827,7 @@
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vmovd %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -840,7 +840,7 @@
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vmovd %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX512-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -894,7 +894,7 @@
; X86-SSE2-NEXT: pandn %xmm0, %xmm2
; X86-SSE2-NEXT: por %xmm1, %xmm2
; X86-SSE2-NEXT: movd %xmm2, %eax
-; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax
+; X86-SSE2-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i8:
@@ -908,7 +908,7 @@
; X86-SSE42-NEXT: phminposuw %xmm2, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax
+; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i8:
@@ -922,7 +922,7 @@
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: def %al killed %al killed %eax
+; X86-AVX1-NEXT: ## kill: def $al killed $al killed $eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -937,7 +937,7 @@
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: def %al killed %al killed %eax
+; X86-AVX2-NEXT: ## kill: def $al killed $al killed $eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -975,7 +975,7 @@
; X64-SSE2-NEXT: pandn %xmm0, %xmm2
; X64-SSE2-NEXT: por %xmm1, %xmm2
; X64-SSE2-NEXT: movd %xmm2, %eax
-; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax
+; X64-SSE2-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i8:
@@ -989,7 +989,7 @@
; X64-SSE42-NEXT: phminposuw %xmm2, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax
+; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i8:
@@ -1003,7 +1003,7 @@
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX1-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -1018,7 +1018,7 @@
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX2-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -1033,7 +1033,7 @@
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX512-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -1528,7 +1528,7 @@
; X86-SSE2-NEXT: psrld $16, %xmm1
; X86-SSE2-NEXT: pminsw %xmm0, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i16:
@@ -1541,7 +1541,7 @@
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i16:
@@ -1556,7 +1556,7 @@
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovd %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -1570,7 +1570,7 @@
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -1587,7 +1587,7 @@
; X64-SSE2-NEXT: psrld $16, %xmm1
; X64-SSE2-NEXT: pminsw %xmm0, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i16:
@@ -1600,7 +1600,7 @@
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i16:
@@ -1615,7 +1615,7 @@
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -1629,7 +1629,7 @@
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vmovd %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -1644,7 +1644,7 @@
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vmovd %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX512-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -1711,7 +1711,7 @@
; X86-SSE2-NEXT: pandn %xmm0, %xmm1
; X86-SSE2-NEXT: por %xmm2, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax
+; X86-SSE2-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v64i8:
@@ -1727,7 +1727,7 @@
; X86-SSE42-NEXT: phminposuw %xmm2, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax
+; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v64i8:
@@ -1744,7 +1744,7 @@
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: def %al killed %al killed %eax
+; X86-AVX1-NEXT: ## kill: def $al killed $al killed $eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -1760,7 +1760,7 @@
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: def %al killed %al killed %eax
+; X86-AVX2-NEXT: ## kill: def $al killed $al killed $eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -1808,7 +1808,7 @@
; X64-SSE2-NEXT: pandn %xmm0, %xmm1
; X64-SSE2-NEXT: por %xmm2, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax
+; X64-SSE2-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v64i8:
@@ -1824,7 +1824,7 @@
; X64-SSE42-NEXT: phminposuw %xmm2, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax
+; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v64i8:
@@ -1841,7 +1841,7 @@
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX1-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -1857,7 +1857,7 @@
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX2-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -1874,7 +1874,7 @@
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX512-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <64 x i8> %a0, <64 x i8> undef, <64 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
diff --git a/llvm/test/CodeGen/X86/horizontal-reduce-umax.ll b/llvm/test/CodeGen/X86/horizontal-reduce-umax.ll
index 9364396..65f87f2 100644
--- a/llvm/test/CodeGen/X86/horizontal-reduce-umax.ll
+++ b/llvm/test/CodeGen/X86/horizontal-reduce-umax.ll
@@ -254,7 +254,7 @@
; X86-SSE2-NEXT: pandn %xmm0, %xmm3
; X86-SSE2-NEXT: por %xmm2, %xmm3
; X86-SSE2-NEXT: movd %xmm3, %eax
-; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v8i16:
@@ -264,7 +264,7 @@
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v8i16:
@@ -274,7 +274,7 @@
; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vmovd %xmm0, %eax
-; X86-AVX-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-AVX-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v8i16:
@@ -308,7 +308,7 @@
; X64-SSE2-NEXT: pandn %xmm0, %xmm3
; X64-SSE2-NEXT: por %xmm2, %xmm3
; X64-SSE2-NEXT: movd %xmm3, %eax
-; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v8i16:
@@ -318,7 +318,7 @@
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v8i16:
@@ -328,7 +328,7 @@
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v8i16:
@@ -338,7 +338,7 @@
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vmovd %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v8i16:
@@ -347,7 +347,7 @@
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0
; X64-AVX512-NEXT: vmovd %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX512-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX512-NEXT: retq
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = icmp ugt <8 x i16> %a0, %1
@@ -376,7 +376,7 @@
; X86-SSE2-NEXT: psrlw $8, %xmm0
; X86-SSE2-NEXT: pmaxub %xmm1, %xmm0
; X86-SSE2-NEXT: movd %xmm0, %eax
-; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax
+; X86-SSE2-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i8:
@@ -389,7 +389,7 @@
; X86-SSE42-NEXT: phminposuw %xmm2, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax
+; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v16i8:
@@ -401,7 +401,7 @@
; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX-NEXT: ## kill: def %al killed %al killed %eax
+; X86-AVX-NEXT: ## kill: def $al killed $al killed $eax
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v16i8:
@@ -417,7 +417,7 @@
; X64-SSE2-NEXT: psrlw $8, %xmm0
; X64-SSE2-NEXT: pmaxub %xmm1, %xmm0
; X64-SSE2-NEXT: movd %xmm0, %eax
-; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax
+; X64-SSE2-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i8:
@@ -430,7 +430,7 @@
; X64-SSE42-NEXT: phminposuw %xmm2, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax
+; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v16i8:
@@ -442,7 +442,7 @@
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX1-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_reduce_v16i8:
@@ -454,7 +454,7 @@
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX2-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: test_reduce_v16i8:
@@ -465,7 +465,7 @@
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0
; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX512-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX512-NEXT: retq
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = icmp ugt <16 x i8> %a0, %1
@@ -895,7 +895,7 @@
; X86-SSE2-NEXT: pandn %xmm0, %xmm1
; X86-SSE2-NEXT: por %xmm3, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i16:
@@ -906,7 +906,7 @@
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v16i16:
@@ -918,7 +918,7 @@
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovd %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -931,7 +931,7 @@
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -974,7 +974,7 @@
; X64-SSE2-NEXT: pandn %xmm0, %xmm1
; X64-SSE2-NEXT: por %xmm3, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i16:
@@ -985,7 +985,7 @@
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v16i16:
@@ -997,7 +997,7 @@
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -1010,7 +1010,7 @@
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vmovd %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -1022,7 +1022,7 @@
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0
; X64-AVX512-NEXT: vmovd %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX512-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -1056,7 +1056,7 @@
; X86-SSE2-NEXT: psrlw $8, %xmm0
; X86-SSE2-NEXT: pmaxub %xmm1, %xmm0
; X86-SSE2-NEXT: movd %xmm0, %eax
-; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax
+; X86-SSE2-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i8:
@@ -1070,7 +1070,7 @@
; X86-SSE42-NEXT: phminposuw %xmm2, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax
+; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i8:
@@ -1084,7 +1084,7 @@
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: def %al killed %al killed %eax
+; X86-AVX1-NEXT: ## kill: def $al killed $al killed $eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -1099,7 +1099,7 @@
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: def %al killed %al killed %eax
+; X86-AVX2-NEXT: ## kill: def $al killed $al killed $eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -1117,7 +1117,7 @@
; X64-SSE2-NEXT: psrlw $8, %xmm0
; X64-SSE2-NEXT: pmaxub %xmm1, %xmm0
; X64-SSE2-NEXT: movd %xmm0, %eax
-; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax
+; X64-SSE2-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i8:
@@ -1131,7 +1131,7 @@
; X64-SSE42-NEXT: phminposuw %xmm2, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax
+; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i8:
@@ -1145,7 +1145,7 @@
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX1-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -1160,7 +1160,7 @@
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX2-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -1174,7 +1174,7 @@
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0
; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX512-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -1801,7 +1801,7 @@
; X86-SSE2-NEXT: pandn %xmm0, %xmm2
; X86-SSE2-NEXT: por %xmm1, %xmm2
; X86-SSE2-NEXT: movd %xmm2, %eax
-; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i16:
@@ -1814,7 +1814,7 @@
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i16:
@@ -1829,7 +1829,7 @@
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovd %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -1843,7 +1843,7 @@
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -1902,7 +1902,7 @@
; X64-SSE2-NEXT: pandn %xmm0, %xmm2
; X64-SSE2-NEXT: por %xmm1, %xmm2
; X64-SSE2-NEXT: movd %xmm2, %eax
-; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i16:
@@ -1915,7 +1915,7 @@
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i16:
@@ -1930,7 +1930,7 @@
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -1944,7 +1944,7 @@
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vmovd %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -1958,7 +1958,7 @@
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0
; X64-AVX512-NEXT: vmovd %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX512-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -1997,7 +1997,7 @@
; X86-SSE2-NEXT: psrlw $8, %xmm0
; X86-SSE2-NEXT: pmaxub %xmm1, %xmm0
; X86-SSE2-NEXT: movd %xmm0, %eax
-; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax
+; X86-SSE2-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v64i8:
@@ -2013,7 +2013,7 @@
; X86-SSE42-NEXT: phminposuw %xmm2, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax
+; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v64i8:
@@ -2030,7 +2030,7 @@
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: def %al killed %al killed %eax
+; X86-AVX1-NEXT: ## kill: def $al killed $al killed $eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -2046,7 +2046,7 @@
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: def %al killed %al killed %eax
+; X86-AVX2-NEXT: ## kill: def $al killed $al killed $eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -2066,7 +2066,7 @@
; X64-SSE2-NEXT: psrlw $8, %xmm0
; X64-SSE2-NEXT: pmaxub %xmm1, %xmm0
; X64-SSE2-NEXT: movd %xmm0, %eax
-; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax
+; X64-SSE2-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v64i8:
@@ -2082,7 +2082,7 @@
; X64-SSE42-NEXT: phminposuw %xmm2, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax
+; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v64i8:
@@ -2099,7 +2099,7 @@
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX1-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -2115,7 +2115,7 @@
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX2-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -2131,7 +2131,7 @@
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0
; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX512-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <64 x i8> %a0, <64 x i8> undef, <64 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
diff --git a/llvm/test/CodeGen/X86/horizontal-reduce-umin.ll b/llvm/test/CodeGen/X86/horizontal-reduce-umin.ll
index 2a37d17..23e680c 100644
--- a/llvm/test/CodeGen/X86/horizontal-reduce-umin.ll
+++ b/llvm/test/CodeGen/X86/horizontal-reduce-umin.ll
@@ -256,21 +256,21 @@
; X86-SSE2-NEXT: pandn %xmm0, %xmm1
; X86-SSE2-NEXT: por %xmm3, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v8i16:
; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v8i16:
; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX-NEXT: vmovd %xmm0, %eax
-; X86-AVX-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-AVX-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v8i16:
@@ -304,21 +304,21 @@
; X64-SSE2-NEXT: pandn %xmm0, %xmm1
; X64-SSE2-NEXT: por %xmm3, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v8i16:
; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v8i16:
; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX-NEXT: vmovd %xmm0, %eax
-; X64-AVX-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX-NEXT: retq
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = icmp ult <8 x i16> %a0, %1
@@ -347,7 +347,7 @@
; X86-SSE2-NEXT: psrlw $8, %xmm0
; X86-SSE2-NEXT: pminub %xmm1, %xmm0
; X86-SSE2-NEXT: movd %xmm0, %eax
-; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax
+; X86-SSE2-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i8:
@@ -357,7 +357,7 @@
; X86-SSE42-NEXT: pminub %xmm0, %xmm1
; X86-SSE42-NEXT: phminposuw %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax
+; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v16i8:
@@ -366,7 +366,7 @@
; X86-AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX-NEXT: ## kill: def %al killed %al killed %eax
+; X86-AVX-NEXT: ## kill: def $al killed $al killed $eax
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v16i8:
@@ -382,7 +382,7 @@
; X64-SSE2-NEXT: psrlw $8, %xmm0
; X64-SSE2-NEXT: pminub %xmm1, %xmm0
; X64-SSE2-NEXT: movd %xmm0, %eax
-; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax
+; X64-SSE2-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i8:
@@ -392,7 +392,7 @@
; X64-SSE42-NEXT: pminub %xmm0, %xmm1
; X64-SSE42-NEXT: phminposuw %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax
+; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v16i8:
@@ -401,7 +401,7 @@
; X64-AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX-NEXT: retq
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = icmp ult <16 x i8> %a0, %1
@@ -835,7 +835,7 @@
; X86-SSE2-NEXT: pandn %xmm0, %xmm2
; X86-SSE2-NEXT: por %xmm4, %xmm2
; X86-SSE2-NEXT: movd %xmm2, %eax
-; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i16:
@@ -843,7 +843,7 @@
; X86-SSE42-NEXT: pminuw %xmm1, %xmm0
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v16i16:
@@ -852,7 +852,7 @@
; X86-AVX1-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vmovd %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -862,7 +862,7 @@
; X86-AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -905,7 +905,7 @@
; X64-SSE2-NEXT: pandn %xmm0, %xmm2
; X64-SSE2-NEXT: por %xmm4, %xmm2
; X64-SSE2-NEXT: movd %xmm2, %eax
-; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i16:
@@ -913,7 +913,7 @@
; X64-SSE42-NEXT: pminuw %xmm1, %xmm0
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v16i16:
@@ -922,7 +922,7 @@
; X64-AVX1-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -932,7 +932,7 @@
; X64-AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vmovd %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -942,7 +942,7 @@
; X64-AVX512-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vmovd %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX512-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -976,7 +976,7 @@
; X86-SSE2-NEXT: psrlw $8, %xmm0
; X86-SSE2-NEXT: pminub %xmm1, %xmm0
; X86-SSE2-NEXT: movd %xmm0, %eax
-; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax
+; X86-SSE2-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i8:
@@ -987,7 +987,7 @@
; X86-SSE42-NEXT: pminub %xmm0, %xmm1
; X86-SSE42-NEXT: phminposuw %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax
+; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i8:
@@ -998,7 +998,7 @@
; X86-AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: def %al killed %al killed %eax
+; X86-AVX1-NEXT: ## kill: def $al killed $al killed $eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -1010,7 +1010,7 @@
; X86-AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: def %al killed %al killed %eax
+; X86-AVX2-NEXT: ## kill: def $al killed $al killed $eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -1028,7 +1028,7 @@
; X64-SSE2-NEXT: psrlw $8, %xmm0
; X64-SSE2-NEXT: pminub %xmm1, %xmm0
; X64-SSE2-NEXT: movd %xmm0, %eax
-; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax
+; X64-SSE2-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i8:
@@ -1039,7 +1039,7 @@
; X64-SSE42-NEXT: pminub %xmm0, %xmm1
; X64-SSE42-NEXT: phminposuw %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax
+; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i8:
@@ -1050,7 +1050,7 @@
; X64-AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX1-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -1062,7 +1062,7 @@
; X64-AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX2-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -1074,7 +1074,7 @@
; X64-AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX512-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -1699,7 +1699,7 @@
; X86-SSE2-NEXT: pandn %xmm0, %xmm4
; X86-SSE2-NEXT: por %xmm2, %xmm4
; X86-SSE2-NEXT: movd %xmm4, %eax
-; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i16:
@@ -1709,7 +1709,7 @@
; X86-SSE42-NEXT: pminuw %xmm1, %xmm0
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i16:
@@ -1721,7 +1721,7 @@
; X86-AVX1-NEXT: vpminuw %xmm2, %xmm0, %xmm0
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vmovd %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -1732,7 +1732,7 @@
; X86-AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X86-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -1791,7 +1791,7 @@
; X64-SSE2-NEXT: pandn %xmm0, %xmm4
; X64-SSE2-NEXT: por %xmm2, %xmm4
; X64-SSE2-NEXT: movd %xmm4, %eax
-; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i16:
@@ -1801,7 +1801,7 @@
; X64-SSE42-NEXT: pminuw %xmm1, %xmm0
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-SSE42-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i16:
@@ -1813,7 +1813,7 @@
; X64-AVX1-NEXT: vpminuw %xmm2, %xmm0, %xmm0
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX1-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -1824,7 +1824,7 @@
; X64-AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vmovd %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX2-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -1836,7 +1836,7 @@
; X64-AVX512-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vmovd %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-AVX512-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -1875,7 +1875,7 @@
; X86-SSE2-NEXT: psrlw $8, %xmm0
; X86-SSE2-NEXT: pminub %xmm1, %xmm0
; X86-SSE2-NEXT: movd %xmm0, %eax
-; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax
+; X86-SSE2-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v64i8:
@@ -1888,7 +1888,7 @@
; X86-SSE42-NEXT: pminub %xmm0, %xmm1
; X86-SSE42-NEXT: phminposuw %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax
+; X86-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v64i8:
@@ -1902,7 +1902,7 @@
; X86-AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: def %al killed %al killed %eax
+; X86-AVX1-NEXT: ## kill: def $al killed $al killed $eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -1915,7 +1915,7 @@
; X86-AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: def %al killed %al killed %eax
+; X86-AVX2-NEXT: ## kill: def $al killed $al killed $eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -1935,7 +1935,7 @@
; X64-SSE2-NEXT: psrlw $8, %xmm0
; X64-SSE2-NEXT: pminub %xmm1, %xmm0
; X64-SSE2-NEXT: movd %xmm0, %eax
-; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax
+; X64-SSE2-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v64i8:
@@ -1948,7 +1948,7 @@
; X64-SSE42-NEXT: pminub %xmm0, %xmm1
; X64-SSE42-NEXT: phminposuw %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax
+; X64-SSE42-NEXT: ## kill: def $al killed $al killed $eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v64i8:
@@ -1962,7 +1962,7 @@
; X64-AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX1-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -1975,7 +1975,7 @@
; X64-AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX2-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -1989,7 +1989,7 @@
; X64-AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: def %al killed %al killed %eax
+; X64-AVX512-NEXT: ## kill: def $al killed $al killed $eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <64 x i8> %a0, <64 x i8> undef, <64 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
diff --git a/llvm/test/CodeGen/X86/iabs.ll b/llvm/test/CodeGen/X86/iabs.ll
index 95b0328..0b2e397 100644
--- a/llvm/test/CodeGen/X86/iabs.ll
+++ b/llvm/test/CodeGen/X86/iabs.ll
@@ -41,7 +41,7 @@
; X86-NO-CMOV-NEXT: sarw $15, %cx
; X86-NO-CMOV-NEXT: addl %ecx, %eax
; X86-NO-CMOV-NEXT: xorl %ecx, %eax
-; X86-NO-CMOV-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NO-CMOV-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NO-CMOV-NEXT: retl
;
; X86-CMOV-LABEL: test_i16:
diff --git a/llvm/test/CodeGen/X86/illegal-bitfield-loadstore.ll b/llvm/test/CodeGen/X86/illegal-bitfield-loadstore.ll
index 0bd84bb..10986c0 100644
--- a/llvm/test/CodeGen/X86/illegal-bitfield-loadstore.ll
+++ b/llvm/test/CodeGen/X86/illegal-bitfield-loadstore.ll
@@ -116,7 +116,7 @@
; X64-NEXT: movzwl 4(%rdi), %eax
; X64-NEXT: movzbl 6(%rdi), %ecx
; X64-NEXT: movb %cl, 6(%rdi)
-; X64-NEXT: # kill: def %ecx killed %ecx killed %rcx def %rcx
+; X64-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx
; X64-NEXT: shll $16, %ecx
; X64-NEXT: orl %eax, %ecx
; X64-NEXT: shlq $32, %rcx
@@ -148,7 +148,7 @@
; X64-NEXT: movzwl 4(%rdi), %eax
; X64-NEXT: movzbl 6(%rdi), %ecx
; X64-NEXT: movb %cl, 6(%rdi)
-; X64-NEXT: # kill: def %ecx killed %ecx killed %rcx def %rcx
+; X64-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx
; X64-NEXT: shll $16, %ecx
; X64-NEXT: orl %eax, %ecx
; X64-NEXT: shlq $32, %rcx
@@ -186,7 +186,7 @@
; X64-NEXT: movzwl 4(%rdi), %ecx
; X64-NEXT: movzbl 6(%rdi), %edx
; X64-NEXT: movb %dl, 6(%rdi)
-; X64-NEXT: # kill: def %edx killed %edx killed %rdx def %rdx
+; X64-NEXT: # kill: def $edx killed $edx killed $rdx def $rdx
; X64-NEXT: shll $16, %edx
; X64-NEXT: orl %ecx, %edx
; X64-NEXT: shlq $32, %rdx
diff --git a/llvm/test/CodeGen/X86/implicit-null-checks.mir b/llvm/test/CodeGen/X86/implicit-null-checks.mir
index 31361ac..5704d0c 100644
--- a/llvm/test/CodeGen/X86/implicit-null-checks.mir
+++ b/llvm/test/CodeGen/X86/implicit-null-checks.mir
@@ -23,7 +23,7 @@
}
;; Negative test. The regalloc is such that we cannot hoist the
- ;; instruction materializing 2200000 into %eax
+ ;; instruction materializing 2200000 into $eax
define i32 @imp_null_check_with_bitwise_op_1(i32* %x, i32 %val, i32* %ptr) {
entry:
br i1 undef, label %is_null, label %not_null, !make.implicit !0
@@ -387,39 +387,39 @@
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%esi' }
+ - { reg: '$rdi' }
+ - { reg: '$esi' }
# CHECK: bb.0.entry:
-# CHECK: %eax = MOV32ri 2200000
-# CHECK-NEXT: %eax = FAULTING_OP 1, %bb.3, {{[0-9]+}}, %eax, %rdi, 1, %noreg, 0, %noreg, implicit-def %eflags :: (load 4 from %ir.x)
+# CHECK: $eax = MOV32ri 2200000
+# CHECK-NEXT: $eax = FAULTING_OP 1, %bb.3, {{[0-9]+}}, $eax, $rdi, 1, $noreg, 0, $noreg, implicit-def $eflags :: (load 4 from %ir.x)
# CHECK-NEXT: JMP_1 %bb.1
body: |
bb.0.entry:
- liveins: %esi, %rdi
+ liveins: $esi, $rdi
- TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.3, implicit %eflags
+ TEST64rr $rdi, $rdi, implicit-def $eflags
+ JE_1 %bb.3, implicit $eflags
bb.1.not_null:
- liveins: %esi, %rdi
+ liveins: $esi, $rdi
- %eax = MOV32ri 2200000
- %eax = AND32rm killed %eax, killed %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.x)
- CMP32rr killed %eax, killed %esi, implicit-def %eflags
- JE_1 %bb.4, implicit %eflags
+ $eax = MOV32ri 2200000
+ $eax = AND32rm killed $eax, killed $rdi, 1, $noreg, 0, $noreg, implicit-def dead $eflags :: (load 4 from %ir.x)
+ CMP32rr killed $eax, killed $esi, implicit-def $eflags
+ JE_1 %bb.4, implicit $eflags
bb.2.ret_200:
- %eax = MOV32ri 200
- RETQ %eax
+ $eax = MOV32ri 200
+ RETQ $eax
bb.3.is_null:
- %eax = MOV32ri 42
- RETQ %eax
+ $eax = MOV32ri 42
+ RETQ $eax
bb.4.ret_100:
- %eax = MOV32ri 100
- RETQ %eax
+ $eax = MOV32ri 100
+ RETQ $eax
...
---
@@ -427,42 +427,42 @@
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%esi' }
- - { reg: '%rdx' }
+ - { reg: '$rdi' }
+ - { reg: '$esi' }
+ - { reg: '$rdx' }
# CHECK: bb.0.entry:
-# CHECK: %eax = MOV32rm killed %rdx, 1, %noreg, 0, %noreg :: (volatile load 4 from %ir.ptr)
-# CHECK-NEXT: TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.3, implicit %eflags
+# CHECK: $eax = MOV32rm killed $rdx, 1, $noreg, 0, $noreg :: (volatile load 4 from %ir.ptr)
+# CHECK-NEXT: TEST64rr $rdi, $rdi, implicit-def $eflags
+# CHECK-NEXT: JE_1 %bb.3, implicit $eflags
body: |
bb.0.entry:
- liveins: %esi, %rdi, %rdx
+ liveins: $esi, $rdi, $rdx
- %eax = MOV32rm killed %rdx, 1, %noreg, 0, %noreg :: (volatile load 4 from %ir.ptr)
- TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.3, implicit %eflags
+ $eax = MOV32rm killed $rdx, 1, $noreg, 0, $noreg :: (volatile load 4 from %ir.ptr)
+ TEST64rr $rdi, $rdi, implicit-def $eflags
+ JE_1 %bb.3, implicit $eflags
bb.1.not_null:
- liveins: %esi, %rdi
+ liveins: $esi, $rdi
- %eax = MOV32ri 2200000
- %eax = AND32rm killed %eax, killed %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.x)
- CMP32rr killed %eax, killed %esi, implicit-def %eflags
- JE_1 %bb.4, implicit %eflags
+ $eax = MOV32ri 2200000
+ $eax = AND32rm killed $eax, killed $rdi, 1, $noreg, 0, $noreg, implicit-def dead $eflags :: (load 4 from %ir.x)
+ CMP32rr killed $eax, killed $esi, implicit-def $eflags
+ JE_1 %bb.4, implicit $eflags
bb.2.ret_200:
- %eax = MOV32ri 200
+ $eax = MOV32ri 200
bb.3.is_null:
- liveins: %eax, %ah, %al, %ax, %bh, %bl, %bp, %bpl, %bx, %eax, %ebp, %ebx, %rax, %rbp, %rbx, %r12, %r13, %r14, %r15, %r12b, %r13b, %r14b, %r15b, %r12d, %r13d, %r14d, %r15d, %r12w, %r13w, %r14w, %r15w
+ liveins: $eax, $ah, $al, $ax, $bh, $bl, $bp, $bpl, $bx, $eax, $ebp, $ebx, $rax, $rbp, $rbx, $r12, $r13, $r14, $r15, $r12b, $r13b, $r14b, $r15b, $r12d, $r13d, $r14d, $r15d, $r12w, $r13w, $r14w, $r15w
- RETQ %eax
+ RETQ $eax
bb.4.ret_100:
- %eax = MOV32ri 100
- RETQ %eax
+ $eax = MOV32ri 100
+ RETQ $eax
...
---
@@ -471,39 +471,39 @@
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%esi' }
+ - { reg: '$rdi' }
+ - { reg: '$esi' }
# CHECK: bb.0.entry:
-# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.3, implicit %eflags
+# CHECK: TEST64rr $rdi, $rdi, implicit-def $eflags
+# CHECK-NEXT: JE_1 %bb.3, implicit $eflags
body: |
bb.0.entry:
- liveins: %esi, %rdi
+ liveins: $esi, $rdi
- TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.3, implicit %eflags
+ TEST64rr $rdi, $rdi, implicit-def $eflags
+ JE_1 %bb.3, implicit $eflags
bb.1.not_null:
- liveins: %esi, %rdi
+ liveins: $esi, $rdi
- %eax = MOV32ri 2200000
- %eax = ADD32ri killed %eax, 100, implicit-def dead %eflags
- %eax = AND32rm killed %eax, killed %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.x)
- CMP32rr killed %eax, killed %esi, implicit-def %eflags
- JE_1 %bb.4, implicit %eflags
+ $eax = MOV32ri 2200000
+ $eax = ADD32ri killed $eax, 100, implicit-def dead $eflags
+ $eax = AND32rm killed $eax, killed $rdi, 1, $noreg, 0, $noreg, implicit-def dead $eflags :: (load 4 from %ir.x)
+ CMP32rr killed $eax, killed $esi, implicit-def $eflags
+ JE_1 %bb.4, implicit $eflags
bb.2.ret_200:
- %eax = MOV32ri 200
- RETQ %eax
+ $eax = MOV32ri 200
+ RETQ $eax
bb.3.is_null:
- %eax = MOV32ri 42
- RETQ %eax
+ $eax = MOV32ri 42
+ RETQ $eax
bb.4.ret_100:
- %eax = MOV32ri 100
- RETQ %eax
+ $eax = MOV32ri 100
+ RETQ $eax
...
---
@@ -512,38 +512,38 @@
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%rsi' }
+ - { reg: '$rdi' }
+ - { reg: '$rsi' }
# CHECK: bb.0.entry:
-# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.3, implicit %eflags
+# CHECK: TEST64rr $rdi, $rdi, implicit-def $eflags
+# CHECK-NEXT: JE_1 %bb.3, implicit $eflags
body: |
bb.0.entry:
- liveins: %rsi, %rdi
+ liveins: $rsi, $rdi
- TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.3, implicit %eflags
+ TEST64rr $rdi, $rdi, implicit-def $eflags
+ JE_1 %bb.3, implicit $eflags
bb.1.not_null:
- liveins: %rsi, %rdi
+ liveins: $rsi, $rdi
- %rdi = MOV64ri 5000
- %rdi = AND64rm killed %rdi, killed %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.x)
- CMP64rr killed %rdi, killed %rsi, implicit-def %eflags
- JE_1 %bb.4, implicit %eflags
+ $rdi = MOV64ri 5000
+ $rdi = AND64rm killed $rdi, killed $rdi, 1, $noreg, 0, $noreg, implicit-def dead $eflags :: (load 4 from %ir.x)
+ CMP64rr killed $rdi, killed $rsi, implicit-def $eflags
+ JE_1 %bb.4, implicit $eflags
bb.2.ret_200:
- %eax = MOV32ri 200
- RETQ %eax
+ $eax = MOV32ri 200
+ RETQ $eax
bb.3.is_null:
- %eax = MOV32ri 42
- RETQ %eax
+ $eax = MOV32ri 42
+ RETQ $eax
bb.4.ret_100:
- %eax = MOV32ri 100
- RETQ %eax
+ $eax = MOV32ri 100
+ RETQ $eax
...
---
@@ -552,39 +552,39 @@
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%rsi' }
+ - { reg: '$rdi' }
+ - { reg: '$rsi' }
# CHECK: bb.0.entry:
-# CHECK: %rbx = MOV64rr %rdx
-# CHECK-NEXT: %rbx = FAULTING_OP 1, %bb.3, {{[0-9]+}}, %rbx, %rdi, 1, %noreg, 0, %noreg, implicit-def %eflags :: (load 4 from %ir.x)
+# CHECK: $rbx = MOV64rr $rdx
+# CHECK-NEXT: $rbx = FAULTING_OP 1, %bb.3, {{[0-9]+}}, $rbx, $rdi, 1, $noreg, 0, $noreg, implicit-def $eflags :: (load 4 from %ir.x)
body: |
bb.0.entry:
- liveins: %rsi, %rdi, %rdx
+ liveins: $rsi, $rdi, $rdx
- TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.3, implicit %eflags
+ TEST64rr $rdi, $rdi, implicit-def $eflags
+ JE_1 %bb.3, implicit $eflags
bb.1.not_null:
- liveins: %rsi, %rdi, %rdx
+ liveins: $rsi, $rdi, $rdx
- %rbx = MOV64rr %rdx
- %rbx = AND64rm killed %rbx, killed %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.x)
- %rdx = MOV64ri 0
- CMP64rr killed %rbx, killed %rsi, implicit-def %eflags
- JE_1 %bb.4, implicit %eflags
+ $rbx = MOV64rr $rdx
+ $rbx = AND64rm killed $rbx, killed $rdi, 1, $noreg, 0, $noreg, implicit-def dead $eflags :: (load 4 from %ir.x)
+ $rdx = MOV64ri 0
+ CMP64rr killed $rbx, killed $rsi, implicit-def $eflags
+ JE_1 %bb.4, implicit $eflags
bb.2.ret_200:
- %eax = MOV32ri 200
- RETQ %eax
+ $eax = MOV32ri 200
+ RETQ $eax
bb.3.is_null:
- %eax = MOV32ri 42
- RETQ %eax
+ $eax = MOV32ri 42
+ RETQ $eax
bb.4.ret_100:
- %eax = MOV32ri 100
- RETQ %eax
+ $eax = MOV32ri 100
+ RETQ $eax
...
---
@@ -593,38 +593,38 @@
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
-calleeSavedRegisters: [ '%bh', '%bl', '%bp', '%bpl', '%bx', '%ebp', '%ebx',
- '%rbp', '%rbx', '%r12', '%r13', '%r14', '%r15',
- '%r12b', '%r13b', '%r14b', '%r15b', '%r12d', '%r13d',
- '%r14d', '%r15d', '%r12w', '%r13w', '%r14w', '%r15w' ]
+ - { reg: '$rdi' }
+calleeSavedRegisters: [ '$bh', '$bl', '$bp', '$bpl', '$bx', '$ebp', '$ebx',
+ '$rbp', '$rbx', '$r12', '$r13', '$r14', '$r15',
+ '$r12b', '$r13b', '$r14b', '$r15b', '$r12d', '$r13d',
+ '$r14d', '$r15d', '$r12w', '$r13w', '$r14w', '$r15w' ]
# CHECK: body:
# CHECK-NOT: FAULTING_OP
# CHECK: bb.1.stay:
# CHECK: CALL64pcrel32
body: |
bb.0.entry:
- liveins: %rdi, %rbx
+ liveins: $rdi, $rbx
- frame-setup PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp
+ frame-setup PUSH64r killed $rbx, implicit-def $rsp, implicit $rsp
CFI_INSTRUCTION def_cfa_offset 16
- CFI_INSTRUCTION offset %rbx, -16
- %rbx = MOV64rr %rdi
- TEST64rr %rbx, %rbx, implicit-def %eflags
- JE_1 %bb.2, implicit killed %eflags
+ CFI_INSTRUCTION offset $rbx, -16
+ $rbx = MOV64rr $rdi
+ TEST64rr $rbx, $rbx, implicit-def $eflags
+ JE_1 %bb.2, implicit killed $eflags
bb.1.stay:
- liveins: %rbx
+ liveins: $rbx
- CALL64pcrel32 @f, csr_64, implicit %rsp, implicit-def %rsp
- %eax = MOV32rm killed %rbx, 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr)
- %rbx = POP64r implicit-def %rsp, implicit %rsp
- RETQ %eax
+ CALL64pcrel32 @f, csr_64, implicit $rsp, implicit-def $rsp
+ $eax = MOV32rm killed $rbx, 1, $noreg, 0, $noreg :: (load 4 from %ir.ptr)
+ $rbx = POP64r implicit-def $rsp, implicit $rsp
+ RETQ $eax
bb.2.leave:
- %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags
- %rbx = POP64r implicit-def %rsp, implicit %rsp
- RETQ %eax
+ $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags
+ $rbx = POP64r implicit-def $rsp, implicit $rsp
+ RETQ $eax
...
---
@@ -636,154 +636,154 @@
# Make sure that the BEXTR32rm instruction below is not used to emit
# an implicit null check -- hoisting it will require hosting the move
-# to %esi and we cannot do that without clobbering the use of %rsi in
+# to $esi and we cannot do that without clobbering the use of $rsi in
# the first instruction in bb.1.not_null.
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%rsi' }
+ - { reg: '$rdi' }
+ - { reg: '$rsi' }
body: |
bb.0.entry:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2, implicit killed %eflags
+ TEST64rr $rdi, $rdi, implicit-def $eflags
+ JE_1 %bb.2, implicit killed $eflags
bb.1.not_null:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- %rcx = MOV64rm killed %rsi, 1, %noreg, 0, %noreg :: (load 8 from %ir.ptr2)
- %esi = MOV32ri 3076
- %eax = BEXTR32rm killed %rdi, 1, %noreg, 0, %noreg, killed %esi, implicit-def dead %eflags :: (load 4 from %ir.ptr)
- %eax = ADD32rm killed %eax, killed %rcx, 1, %noreg, 0, %noreg, implicit-def dead %eflags :: (load 4 from %ir.val)
- RETQ %eax
+ $rcx = MOV64rm killed $rsi, 1, $noreg, 0, $noreg :: (load 8 from %ir.ptr2)
+ $esi = MOV32ri 3076
+ $eax = BEXTR32rm killed $rdi, 1, $noreg, 0, $noreg, killed $esi, implicit-def dead $eflags :: (load 4 from %ir.ptr)
+ $eax = ADD32rm killed $eax, killed $rcx, 1, $noreg, 0, $noreg, implicit-def dead $eflags :: (load 4 from %ir.val)
+ RETQ $eax
bb.2.is_null:
- %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags
- RETQ %eax
+ $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags
+ RETQ $eax
...
---
name: use_alternate_load_op
# CHECK-LABEL: name: use_alternate_load_op
# CHECK: bb.0.entry:
-# CHECK: %rax = FAULTING_OP 1, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg
+# CHECK: $rax = FAULTING_OP 1, %bb.2, {{[0-9]+}}, $rdi, 1, $noreg, 0, $noreg
# CHECK-NEXT: JMP_1 %bb.1
# CHECK: bb.1.not_null
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%rsi' }
+ - { reg: '$rdi' }
+ - { reg: '$rsi' }
body: |
bb.0.entry:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2, implicit killed %eflags
+ TEST64rr $rdi, $rdi, implicit-def $eflags
+ JE_1 %bb.2, implicit killed $eflags
bb.1.not_null:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- %rcx = MOV64rm killed %rsi, 1, %noreg, 0, %noreg
- %rcx = AND64rm killed %rcx, %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags
- %rax = MOV64rm killed %rdi, 1, %noreg, 0, %noreg
- RETQ %eax
+ $rcx = MOV64rm killed $rsi, 1, $noreg, 0, $noreg
+ $rcx = AND64rm killed $rcx, $rdi, 1, $noreg, 0, $noreg, implicit-def dead $eflags
+ $rax = MOV64rm killed $rdi, 1, $noreg, 0, $noreg
+ RETQ $eax
bb.2.is_null:
- %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags
- RETQ %eax
+ $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags
+ RETQ $eax
...
---
name: imp_null_check_gep_load_with_use_dep
# CHECK-LABEL: name: imp_null_check_gep_load_with_use_dep
# CHECK: bb.0.entry:
-# CHECK: %eax = FAULTING_OP 1, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg, implicit-def %rax :: (load 4 from %ir.x)
+# CHECK: $eax = FAULTING_OP 1, %bb.2, {{[0-9]+}}, $rdi, 1, $noreg, 0, $noreg, implicit-def $rax :: (load 4 from %ir.x)
# CHECK-NEXT: JMP_1 %bb.1
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%rsi' }
+ - { reg: '$rdi' }
+ - { reg: '$rsi' }
body: |
bb.0.entry:
- liveins: %rsi, %rdi
+ liveins: $rsi, $rdi
- TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.1, implicit %eflags
+ TEST64rr $rdi, $rdi, implicit-def $eflags
+ JE_1 %bb.1, implicit $eflags
bb.2.not_null:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- %rsi = ADD64rr %rsi, %rdi, implicit-def dead %eflags
- %eax = MOV32rm killed %rdi, 1, %noreg, 0, %noreg, implicit-def %rax :: (load 4 from %ir.x)
- %eax = LEA64_32r killed %rax, 1, killed %rsi, 4, %noreg
- RETQ %eax
+ $rsi = ADD64rr $rsi, $rdi, implicit-def dead $eflags
+ $eax = MOV32rm killed $rdi, 1, $noreg, 0, $noreg, implicit-def $rax :: (load 4 from %ir.x)
+ $eax = LEA64_32r killed $rax, 1, killed $rsi, 4, $noreg
+ RETQ $eax
bb.1.is_null:
- %eax = MOV32ri 42
- RETQ %eax
+ $eax = MOV32ri 42
+ RETQ $eax
...
---
name: imp_null_check_load_with_base_sep
# CHECK-LABEL: name: imp_null_check_load_with_base_sep
# CHECK: bb.0.entry:
-# CHECK: %rsi = ADD64rr %rsi, %rdi, implicit-def dead %eflags
-# CHECK-NEXT: %esi = FAULTING_OP 1, %bb.2, {{[0-9]+}}, %esi, %rdi, 1, %noreg, 0, %noreg, implicit-def %eflags
+# CHECK: $rsi = ADD64rr $rsi, $rdi, implicit-def dead $eflags
+# CHECK-NEXT: $esi = FAULTING_OP 1, %bb.2, {{[0-9]+}}, $esi, $rdi, 1, $noreg, 0, $noreg, implicit-def $eflags
# CHECK-NEXT: JMP_1 %bb.1
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%rsi' }
+ - { reg: '$rdi' }
+ - { reg: '$rsi' }
body: |
bb.0.entry:
- liveins: %rsi, %rdi
+ liveins: $rsi, $rdi
- TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.1, implicit %eflags
+ TEST64rr $rdi, $rdi, implicit-def $eflags
+ JE_1 %bb.1, implicit $eflags
bb.2.not_null:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- %rsi = ADD64rr %rsi, %rdi, implicit-def dead %eflags
- %esi = AND32rm killed %esi, %rdi, 1, %noreg, 0, %noreg, implicit-def dead %eflags
- %eax = MOV32rr %esi
- RETQ %eax
+ $rsi = ADD64rr $rsi, $rdi, implicit-def dead $eflags
+ $esi = AND32rm killed $esi, $rdi, 1, $noreg, 0, $noreg, implicit-def dead $eflags
+ $eax = MOV32rr $esi
+ RETQ $eax
bb.1.is_null:
- %eax = MOV32ri 42
- RETQ %eax
+ $eax = MOV32ri 42
+ RETQ $eax
...
---
name: inc_store
# CHECK-LABEL: name: inc_store
# CHECK: bb.0.entry:
-# CHECK: %noreg = FAULTING_OP 3, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg, %rsi
+# CHECK: $noreg = FAULTING_OP 3, %bb.2, {{[0-9]+}}, $rdi, 1, $noreg, 0, $noreg, $rsi
# CHECK-NEXT: JMP_1 %bb.1
# CHECK: bb.1.not_null
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%rsi' }
+ - { reg: '$rdi' }
+ - { reg: '$rsi' }
body: |
bb.0.entry:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2, implicit killed %eflags
+ TEST64rr $rdi, $rdi, implicit-def $eflags
+ JE_1 %bb.2, implicit killed $eflags
bb.1.not_null:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- MOV64mr killed %rdi, 1, %noreg, 0, %noreg, killed %rsi
+ MOV64mr killed $rdi, 1, $noreg, 0, $noreg, killed $rsi
RETQ
bb.2.is_null:
@@ -794,26 +794,26 @@
name: inc_store_plus_offset
# CHECK-LABEL: inc_store_plus_offset
# CHECK: bb.0.entry:
-# CHECK: %noreg = FAULTING_OP 3, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 16, %noreg, %rsi
+# CHECK: $noreg = FAULTING_OP 3, %bb.2, {{[0-9]+}}, $rdi, 1, $noreg, 16, $noreg, $rsi
# CHECK-NEXT: JMP_1 %bb.1
# CHECK: bb.1.not_null
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%rsi' }
+ - { reg: '$rdi' }
+ - { reg: '$rsi' }
body: |
bb.0.entry:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2, implicit killed %eflags
+ TEST64rr $rdi, $rdi, implicit-def $eflags
+ JE_1 %bb.2, implicit killed $eflags
bb.1.not_null:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- MOV64mr killed %rdi, 1, %noreg, 16, %noreg, killed %rsi
+ MOV64mr killed $rdi, 1, $noreg, 16, $noreg, killed $rsi
RETQ
bb.2.is_null:
@@ -824,28 +824,28 @@
name: inc_store_with_dep
# CHECK-LABEL: inc_store_with_dep
# CHECK: bb.0.entry:
-# CHECK: %esi = ADD32rr killed %esi, killed %esi, implicit-def dead %eflags
-# CHECK-NEXT: %noreg = FAULTING_OP 3, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 16, %noreg, %esi
+# CHECK: $esi = ADD32rr killed $esi, killed $esi, implicit-def dead $eflags
+# CHECK-NEXT: $noreg = FAULTING_OP 3, %bb.2, {{[0-9]+}}, $rdi, 1, $noreg, 16, $noreg, $esi
# CHECK-NEXT: JMP_1 %bb.1
# CHECK: bb.1.not_null
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%rsi' }
+ - { reg: '$rdi' }
+ - { reg: '$rsi' }
body: |
bb.0.entry:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2, implicit killed %eflags
+ TEST64rr $rdi, $rdi, implicit-def $eflags
+ JE_1 %bb.2, implicit killed $eflags
bb.1.not_null:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- %esi = ADD32rr killed %esi, killed %esi, implicit-def dead %eflags
- MOV32mr killed %rdi, 1, %noreg, 16, %noreg, killed %esi
+ $esi = ADD32rr killed $esi, killed $esi, implicit-def dead $eflags
+ MOV32mr killed $rdi, 1, $noreg, 16, $noreg, killed $esi
RETQ
bb.2.is_null:
@@ -856,61 +856,61 @@
name: inc_store_with_dep_in_null
# CHECK-LABEL: inc_store_with_dep_in_null
# CHECK: bb.0.entry:
-# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags
+# CHECK: TEST64rr $rdi, $rdi, implicit-def $eflags
+# CHECK-NEXT: JE_1 %bb.2, implicit killed $eflags
# CHECK: bb.1.not_null
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%rsi' }
+ - { reg: '$rdi' }
+ - { reg: '$rsi' }
body: |
bb.0.entry:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2, implicit killed %eflags
+ TEST64rr $rdi, $rdi, implicit-def $eflags
+ JE_1 %bb.2, implicit killed $eflags
bb.1.not_null:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- %esi = ADD32rr %esi, %esi, implicit-def dead %eflags
- MOV32mr killed %rdi, 1, %noreg, 0, %noreg, %esi
- %eax = MOV32rr killed %esi
- RETQ %eax
+ $esi = ADD32rr $esi, $esi, implicit-def dead $eflags
+ MOV32mr killed $rdi, 1, $noreg, 0, $noreg, $esi
+ $eax = MOV32rr killed $esi
+ RETQ $eax
bb.2.is_null:
- liveins: %rsi
+ liveins: $rsi
- %eax = MOV32rr killed %esi
- RETQ %eax
+ $eax = MOV32rr killed $esi
+ RETQ $eax
...
---
name: inc_store_with_volatile
# CHECK-LABEL: inc_store_with_volatile
# CHECK: bb.0.entry:
-# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags
+# CHECK: TEST64rr $rdi, $rdi, implicit-def $eflags
+# CHECK-NEXT: JE_1 %bb.2, implicit killed $eflags
# CHECK: bb.1.not_null
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%rsi' }
+ - { reg: '$rdi' }
+ - { reg: '$rsi' }
body: |
bb.0.entry:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2, implicit killed %eflags
+ TEST64rr $rdi, $rdi, implicit-def $eflags
+ JE_1 %bb.2, implicit killed $eflags
bb.1.not_null:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- MOV32mr killed %rdi, 1, %noreg, 0, %noreg, killed %esi :: (volatile store 4 into %ir.ptr)
+ MOV32mr killed $rdi, 1, $noreg, 0, $noreg, killed $esi :: (volatile store 4 into %ir.ptr)
RETQ
bb.2.is_null:
@@ -921,28 +921,28 @@
name: inc_store_with_two_dep
# CHECK-LABEL: inc_store_with_two_dep
# CHECK: bb.0.entry:
-# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags
+# CHECK: TEST64rr $rdi, $rdi, implicit-def $eflags
+# CHECK-NEXT: JE_1 %bb.2, implicit killed $eflags
# CHECK: bb.1.not_null
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%rsi' }
+ - { reg: '$rdi' }
+ - { reg: '$rsi' }
body: |
bb.0.entry:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2, implicit killed %eflags
+ TEST64rr $rdi, $rdi, implicit-def $eflags
+ JE_1 %bb.2, implicit killed $eflags
bb.1.not_null:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- %esi = ADD32rr killed %esi, killed %esi, implicit-def dead %eflags
- %esi = ADD32ri killed %esi, 15, implicit-def dead %eflags
- MOV32mr killed %rdi, 1, %noreg, 16, %noreg, killed %esi
+ $esi = ADD32rr killed $esi, killed $esi, implicit-def dead $eflags
+ $esi = ADD32ri killed $esi, 15, implicit-def dead $eflags
+ MOV32mr killed $rdi, 1, $noreg, 16, $noreg, killed $esi
RETQ
bb.2.is_null:
@@ -953,27 +953,27 @@
name: inc_store_with_redefined_base
# CHECK-LABEL: inc_store_with_redefined_base
# CHECK: bb.0.entry:
-# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags
+# CHECK: TEST64rr $rdi, $rdi, implicit-def $eflags
+# CHECK-NEXT: JE_1 %bb.2, implicit killed $eflags
# CHECK: bb.1.not_null
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%rsi' }
+ - { reg: '$rdi' }
+ - { reg: '$rsi' }
body: |
bb.0.entry:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2, implicit killed %eflags
+ TEST64rr $rdi, $rdi, implicit-def $eflags
+ JE_1 %bb.2, implicit killed $eflags
bb.1.not_null:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- %rdi = ADD64rr killed %rdi, killed %rdi, implicit-def dead %eflags
- MOV32mr killed %rdi, 1, %noreg, 16, %noreg, killed %esi
+ $rdi = ADD64rr killed $rdi, killed $rdi, implicit-def dead $eflags
+ MOV32mr killed $rdi, 1, $noreg, 16, $noreg, killed $esi
RETQ
bb.2.is_null:
@@ -984,198 +984,198 @@
name: inc_store_with_reused_base
# CHECK-LABEL: inc_store_with_reused_base
# CHECK: bb.0.entry:
-# CHECK: %noreg = FAULTING_OP 3, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 16, %noreg, %esi
+# CHECK: $noreg = FAULTING_OP 3, %bb.2, {{[0-9]+}}, $rdi, 1, $noreg, 16, $noreg, $esi
# CHECK-NEXT: JMP_1 %bb.1
# CHECK: bb.1.not_null
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%rsi' }
+ - { reg: '$rdi' }
+ - { reg: '$rsi' }
body: |
bb.0.entry:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2, implicit killed %eflags
+ TEST64rr $rdi, $rdi, implicit-def $eflags
+ JE_1 %bb.2, implicit killed $eflags
bb.1.not_null:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- %rax = MOV64rr %rdi
- MOV32mr killed %rdi, 1, %noreg, 16, %noreg, killed %esi
- RETQ %eax
+ $rax = MOV64rr $rdi
+ MOV32mr killed $rdi, 1, $noreg, 16, $noreg, killed $esi
+ RETQ $eax
bb.2.is_null:
- %rax = XOR64rr undef %rax, undef %rax, implicit-def dead %eflags
- RETQ %eax
+ $rax = XOR64rr undef $rax, undef $rax, implicit-def dead $eflags
+ RETQ $eax
...
---
name: inc_store_across_call
# CHECK-LABEL: inc_store_across_call
# CHECK: bb.0.entry:
-# CHECK: TEST64rr %rbx, %rbx, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags
+# CHECK: TEST64rr $rbx, $rbx, implicit-def $eflags
+# CHECK-NEXT: JE_1 %bb.2, implicit killed $eflags
# CHECK: bb.1.not_null
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
-calleeSavedRegisters: [ '%bh', '%bl', '%bp', '%bpl', '%bx', '%ebp', '%ebx',
- '%rbp', '%rbx', '%r12', '%r13', '%r14', '%r15',
- '%r12b', '%r13b', '%r14b', '%r15b', '%r12d', '%r13d',
- '%r14d', '%r15d', '%r12w', '%r13w', '%r14w', '%r15w' ]
+ - { reg: '$rdi' }
+calleeSavedRegisters: [ '$bh', '$bl', '$bp', '$bpl', '$bx', '$ebp', '$ebx',
+ '$rbp', '$rbx', '$r12', '$r13', '$r14', '$r15',
+ '$r12b', '$r13b', '$r14b', '$r15b', '$r12d', '$r13d',
+ '$r14d', '$r15d', '$r12w', '$r13w', '$r14w', '$r15w' ]
body: |
bb.0.entry:
- liveins: %rdi, %rbx
+ liveins: $rdi, $rbx
- frame-setup PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp
+ frame-setup PUSH64r killed $rbx, implicit-def $rsp, implicit $rsp
CFI_INSTRUCTION def_cfa_offset 16
- CFI_INSTRUCTION offset %rbx, -16
- %rbx = MOV64rr killed %rdi
- TEST64rr %rbx, %rbx, implicit-def %eflags
- JE_1 %bb.2, implicit killed %eflags
+ CFI_INSTRUCTION offset $rbx, -16
+ $rbx = MOV64rr killed $rdi
+ TEST64rr $rbx, $rbx, implicit-def $eflags
+ JE_1 %bb.2, implicit killed $eflags
bb.1.not_null:
- liveins: %rbx
+ liveins: $rbx
- CALL64pcrel32 @f, csr_64, implicit %rsp, implicit-def %rsp
- MOV32mi %rbx, 1, %noreg, 0, %noreg, 20
- %rax = MOV64rr killed %rbx
- %rbx = POP64r implicit-def %rsp, implicit %rsp
- RETQ %eax
+ CALL64pcrel32 @f, csr_64, implicit $rsp, implicit-def $rsp
+ MOV32mi $rbx, 1, $noreg, 0, $noreg, 20
+ $rax = MOV64rr killed $rbx
+ $rbx = POP64r implicit-def $rsp, implicit $rsp
+ RETQ $eax
bb.2.is_null:
- %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags
- %rbx = POP64r implicit-def %rsp, implicit %rsp
- RETQ %eax
+ $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags
+ $rbx = POP64r implicit-def $rsp, implicit $rsp
+ RETQ $eax
...
---
name: inc_store_with_dep_in_dep
# CHECK-LABEL: inc_store_with_dep_in_dep
# CHECK: bb.0.entry:
-# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags
+# CHECK: TEST64rr $rdi, $rdi, implicit-def $eflags
+# CHECK-NEXT: JE_1 %bb.2, implicit killed $eflags
# CHECK: bb.1.not_null
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%rsi' }
+ - { reg: '$rdi' }
+ - { reg: '$rsi' }
body: |
bb.0.entry:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2, implicit killed %eflags
+ TEST64rr $rdi, $rdi, implicit-def $eflags
+ JE_1 %bb.2, implicit killed $eflags
bb.1.not_null:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- %eax = MOV32rr %esi
- %esi = ADD32ri killed %esi, 15, implicit-def dead %eflags
- MOV32mr killed %rdi, 1, %noreg, 0, %noreg, killed %esi
- RETQ %eax
+ $eax = MOV32rr $esi
+ $esi = ADD32ri killed $esi, 15, implicit-def dead $eflags
+ MOV32mr killed $rdi, 1, $noreg, 0, $noreg, killed $esi
+ RETQ $eax
bb.2.is_null:
- %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags
- RETQ %eax
+ $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags
+ RETQ $eax
...
---
name: inc_store_with_load_over_store
# CHECK-LABEL: inc_store_with_load_over_store
# CHECK: bb.0.entry:
-# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags
+# CHECK: TEST64rr $rdi, $rdi, implicit-def $eflags
+# CHECK-NEXT: JE_1 %bb.2, implicit killed $eflags
# CHECK: bb.1.not_null
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%rsi' }
+ - { reg: '$rdi' }
+ - { reg: '$rsi' }
body: |
bb.0.entry:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2, implicit killed %eflags
+ TEST64rr $rdi, $rdi, implicit-def $eflags
+ JE_1 %bb.2, implicit killed $eflags
bb.1.not_null:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- MOV32mi killed %rsi, 1, %noreg, 0, %noreg, 2
- %eax = MOV32rm killed %rdi, 1, %noreg, 0, %noreg
- RETQ %eax
+ MOV32mi killed $rsi, 1, $noreg, 0, $noreg, 2
+ $eax = MOV32rm killed $rdi, 1, $noreg, 0, $noreg
+ RETQ $eax
bb.2.is_null:
- %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags
- RETQ %eax
+ $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags
+ RETQ $eax
...
---
name: inc_store_with_store_over_load
# CHECK-LABEL: inc_store_with_store_over_load
# CHECK: bb.0.entry:
-# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags
+# CHECK: TEST64rr $rdi, $rdi, implicit-def $eflags
+# CHECK-NEXT: JE_1 %bb.2, implicit killed $eflags
# CHECK: bb.1.not_null
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%rsi' }
+ - { reg: '$rdi' }
+ - { reg: '$rsi' }
body: |
bb.0.entry:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2, implicit killed %eflags
+ TEST64rr $rdi, $rdi, implicit-def $eflags
+ JE_1 %bb.2, implicit killed $eflags
bb.1.not_null:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- %eax = MOV32rm killed %rsi, 1, %noreg, 0, %noreg
- MOV32mi killed %rdi, 1, %noreg, 0, %noreg, 2
- RETQ %eax
+ $eax = MOV32rm killed $rsi, 1, $noreg, 0, $noreg
+ MOV32mi killed $rdi, 1, $noreg, 0, $noreg, 2
+ RETQ $eax
bb.2.is_null:
- %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags
- RETQ %eax
+ $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags
+ RETQ $eax
...
---
name: inc_store_with_store_over_store
# CHECK-LABEL: inc_store_with_store_over_store
# CHECK: bb.0.entry:
-# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags
+# CHECK: TEST64rr $rdi, $rdi, implicit-def $eflags
+# CHECK-NEXT: JE_1 %bb.2, implicit killed $eflags
# CHECK: bb.1.not_null
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%rsi' }
+ - { reg: '$rdi' }
+ - { reg: '$rsi' }
body: |
bb.0.entry:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2, implicit killed %eflags
+ TEST64rr $rdi, $rdi, implicit-def $eflags
+ JE_1 %bb.2, implicit killed $eflags
bb.1.not_null:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- MOV32mi killed %rsi, 1, %noreg, 0, %noreg, 3
- MOV32mi killed %rdi, 1, %noreg, 0, %noreg, 2
+ MOV32mi killed $rsi, 1, $noreg, 0, $noreg, 3
+ MOV32mi killed $rdi, 1, $noreg, 0, $noreg, 2
RETQ
bb.2.is_null:
@@ -1186,27 +1186,27 @@
name: inc_store_with_load_and_store
# CHECK-LABEL: inc_store_with_load_and_store
# CHECK: bb.0.entry:
-# CHECK: %noreg = FAULTING_OP 2, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg, %esi, implicit-def %eflags
+# CHECK: $noreg = FAULTING_OP 2, %bb.2, {{[0-9]+}}, $rdi, 1, $noreg, 0, $noreg, $esi, implicit-def $eflags
# CHECK-NEXT: JMP_1 %bb.1
# CHECK: bb.1.not_null
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%rsi' }
+ - { reg: '$rdi' }
+ - { reg: '$rsi' }
body: |
bb.0.entry:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2, implicit killed %eflags
+ TEST64rr $rdi, $rdi, implicit-def $eflags
+ JE_1 %bb.2, implicit killed $eflags
bb.1.not_null:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- %esi = ADD32rr %esi, %esi, implicit-def dead %eflags
- ADD32mr killed %rdi, 1, %noreg, 0, %noreg, killed %esi, implicit-def dead %eflags
+ $esi = ADD32rr $esi, $esi, implicit-def dead $eflags
+ ADD32mr killed $rdi, 1, $noreg, 0, $noreg, killed $esi, implicit-def dead $eflags
RETQ
bb.2.is_null:
@@ -1217,72 +1217,72 @@
name: inc_store_and_load_no_alias
# CHECK-LABEL: inc_store_and_load_no_alias
# CHECK: bb.0.entry:
-# CHECK: %eax = FAULTING_OP 1, %bb.2, {{[0-9]+}}, %rdi, 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr)
+# CHECK: $eax = FAULTING_OP 1, %bb.2, {{[0-9]+}}, $rdi, 1, $noreg, 0, $noreg :: (load 4 from %ir.ptr)
# CHECK-NEXT: JMP_1 %bb.1
# CHECK: bb.1.not_null
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%rsi' }
+ - { reg: '$rdi' }
+ - { reg: '$rsi' }
body: |
bb.0.entry:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2, implicit killed %eflags
+ TEST64rr $rdi, $rdi, implicit-def $eflags
+ JE_1 %bb.2, implicit killed $eflags
bb.1.not_null:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- MOV32mi killed %rsi, 1, %noreg, 0, %noreg, 3 :: (store 4 into %ir.ptr2)
- %eax = MOV32rm killed %rdi, 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr)
- RETQ %eax
+ MOV32mi killed $rsi, 1, $noreg, 0, $noreg, 3 :: (store 4 into %ir.ptr2)
+ $eax = MOV32rm killed $rdi, 1, $noreg, 0, $noreg :: (load 4 from %ir.ptr)
+ RETQ $eax
bb.2.is_null:
- %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags
- RETQ %eax
+ $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags
+ RETQ $eax
...
---
name: inc_store_and_load_alias
# CHECK-LABEL: inc_store_and_load_alias
# CHECK: bb.0.entry:
-# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags
+# CHECK: TEST64rr $rdi, $rdi, implicit-def $eflags
+# CHECK-NEXT: JE_1 %bb.2, implicit killed $eflags
# CHECK: bb.1.not_null
alignment: 4
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%rsi' }
+ - { reg: '$rdi' }
+ - { reg: '$rsi' }
body: |
bb.0.entry:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2, implicit killed %eflags
+ TEST64rr $rdi, $rdi, implicit-def $eflags
+ JE_1 %bb.2, implicit killed $eflags
bb.1.not_null:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- MOV32mi killed %rsi, 1, %noreg, 0, %noreg, 3 :: (store 4 into %ir.ptr2)
- %eax = MOV32rm killed %rdi, 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr)
- RETQ %eax
+ MOV32mi killed $rsi, 1, $noreg, 0, $noreg, 3 :: (store 4 into %ir.ptr2)
+ $eax = MOV32rm killed $rdi, 1, $noreg, 0, $noreg :: (load 4 from %ir.ptr)
+ RETQ $eax
bb.2.is_null:
- %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags
- RETQ %eax
+ $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags
+ RETQ $eax
...
---
name: inc_spill_dep
# CHECK-LABEL: inc_spill_dep
# CHECK: bb.0.entry:
-# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
-# CHECK-NEXT: JE_1 %bb.2, implicit killed %eflags
+# CHECK: TEST64rr $rdi, $rdi, implicit-def $eflags
+# CHECK-NEXT: JE_1 %bb.2, implicit killed $eflags
# CHECK: bb.1.not_null
alignment: 4
@@ -1290,28 +1290,28 @@
stack:
- { id: 0, type: spill-slot, offset: -8, size: 8, alignment: 8}
liveins:
- - { reg: '%rdi' }
- - { reg: '%rsi' }
+ - { reg: '$rdi' }
+ - { reg: '$rsi' }
body: |
bb.0.entry:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- %rsp = frame-setup SUB64ri8 %rsp, 8, implicit-def dead %eflags
- MOV32mr %rsp, 1, %noreg, 0, %noreg, %esi :: (store 4 into %stack.0)
- TEST64rr %rdi, %rdi, implicit-def %eflags
- JE_1 %bb.2, implicit killed %eflags
+ $rsp = frame-setup SUB64ri8 $rsp, 8, implicit-def dead $eflags
+ MOV32mr $rsp, 1, $noreg, 0, $noreg, $esi :: (store 4 into %stack.0)
+ TEST64rr $rdi, $rdi, implicit-def $eflags
+ JE_1 %bb.2, implicit killed $eflags
bb.1.not_null:
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- %r14d = MOV32rm %rsp, 1, %noreg, 0, %noreg :: (load 4 from %stack.0)
- MOV64mr %rsp, 1, %noreg, 0, %noreg, %rdi :: (store 8 into %stack.0)
- %edi = MOV32rm %rdi, 1, %noreg, 8, %noreg :: (load 4 from %ir.ptr)
- %eax = MOV32rr %edi
- RETQ %eax
+ $r14d = MOV32rm $rsp, 1, $noreg, 0, $noreg :: (load 4 from %stack.0)
+ MOV64mr $rsp, 1, $noreg, 0, $noreg, $rdi :: (store 8 into %stack.0)
+ $edi = MOV32rm $rdi, 1, $noreg, 8, $noreg :: (load 4 from %ir.ptr)
+ $eax = MOV32rr $edi
+ RETQ $eax
bb.2.is_null:
- %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags
- RETQ %eax
+ $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags
+ RETQ $eax
...
diff --git a/llvm/test/CodeGen/X86/implicit-use-spill.mir b/llvm/test/CodeGen/X86/implicit-use-spill.mir
index 25f245e9..343f0f7 100644
--- a/llvm/test/CodeGen/X86/implicit-use-spill.mir
+++ b/llvm/test/CodeGen/X86/implicit-use-spill.mir
@@ -11,10 +11,10 @@
bb.0:
; CHECK: NOOP implicit-def [[VAL:%[0-9]+]]
; VAL should be spilled before csr_noregs, i.e., before we clobber all the registers
- ; CHECK-NEXT: MOV64mr [[SLOT:%stack.[0-9]+]], 1, %noreg, 0, %noreg, [[VAL]]
+ ; CHECK-NEXT: MOV64mr [[SLOT:%stack.[0-9]+]], 1, $noreg, 0, $noreg, [[VAL]]
; CHECK-NEXT: NOOP csr_noregs
; We need to reload before the (implicit) use.
- ; CHECK-NEXT: [[RELOADED_VAL:%[0-9]+]]:gr64 = MOV64rm [[SLOT]], 1, %noreg, 0, %noreg
+ ; CHECK-NEXT: [[RELOADED_VAL:%[0-9]+]]:gr64 = MOV64rm [[SLOT]], 1, $noreg, 0, $noreg
; CHECK-NEXT: NOOP implicit [[RELOADED_VAL]]
NOOP implicit-def %0
NOOP csr_noregs
diff --git a/llvm/test/CodeGen/X86/imul.ll b/llvm/test/CodeGen/X86/imul.ll
index ff7df4f..7bb10aa 100644
--- a/llvm/test/CodeGen/X86/imul.ll
+++ b/llvm/test/CodeGen/X86/imul.ll
@@ -218,7 +218,7 @@
define i32 @test2(i32 %a) {
; X64-LABEL: test2:
; X64: # %bb.0: # %entry
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shll $5, %eax
; X64-NEXT: leal (%rax,%rdi), %eax
@@ -239,7 +239,7 @@
define i32 @test3(i32 %a) {
; X64-LABEL: test3:
; X64: # %bb.0: # %entry
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shll $5, %eax
; X64-NEXT: leal (%rax,%rdi), %eax
diff --git a/llvm/test/CodeGen/X86/invalid-liveness.mir b/llvm/test/CodeGen/X86/invalid-liveness.mir
index 47db809..dc939a5 100644
--- a/llvm/test/CodeGen/X86/invalid-liveness.mir
+++ b/llvm/test/CodeGen/X86/invalid-liveness.mir
@@ -16,7 +16,7 @@
- { id: 0, class: gr32 }
body: |
bb.0:
- JG_1 %bb.2, implicit %eflags
+ JG_1 %bb.2, implicit $eflags
JMP_1 %bb.3
bb.2:
@@ -24,6 +24,6 @@
JMP_1 %bb.3
bb.3:
- %eax = COPY %0
- RETQ %eax
+ $eax = COPY %0
+ RETQ $eax
...
diff --git a/llvm/test/CodeGen/X86/ipra-inline-asm.ll b/llvm/test/CodeGen/X86/ipra-inline-asm.ll
index 4b56c3a..dc9cbfa 100644
--- a/llvm/test/CodeGen/X86/ipra-inline-asm.ll
+++ b/llvm/test/CodeGen/X86/ipra-inline-asm.ll
@@ -11,7 +11,7 @@
}
; Verifies that inline assembly is correctly handled by giving a list of clobbered registers
-; CHECK: foo Clobbered Registers: %ah %al %ax %ch %cl %cx %di %dil %eax %ecx %edi %rax %rcx %rdi
+; CHECK: foo Clobbered Registers: $ah $al $ax $ch $cl $cx $di $dil $eax $ecx $edi $rax $rcx $rdi
define void @foo() #0 {
call void asm sideeffect "", "~{eax},~{ecx},~{edi}"() #0
ret void
diff --git a/llvm/test/CodeGen/X86/ipra-reg-alias.ll b/llvm/test/CodeGen/X86/ipra-reg-alias.ll
index c5c3607..86bba7c 100644
--- a/llvm/test/CodeGen/X86/ipra-reg-alias.ll
+++ b/llvm/test/CodeGen/X86/ipra-reg-alias.ll
@@ -6,7 +6,7 @@
%inc2 = mul i8 %inc, 5
; Here only CL is clobbred so CH should not be clobbred, but CX, ECX and RCX
; should be clobbered.
-; CHECK: main Clobbered Registers: %ah %al %ax %cl %cx %eax %ecx %eflags %rax %rcx
+; CHECK: main Clobbered Registers: $ah $al $ax $cl $cx $eax $ecx $eflags $rax $rcx
ret i8 %inc2
}
diff --git a/llvm/test/CodeGen/X86/ipra-reg-usage.ll b/llvm/test/CodeGen/X86/ipra-reg-usage.ll
index 50c066d..4930526 100644
--- a/llvm/test/CodeGen/X86/ipra-reg-usage.ll
+++ b/llvm/test/CodeGen/X86/ipra-reg-usage.ll
@@ -3,7 +3,7 @@
target triple = "x86_64-unknown-unknown"
declare void @bar1()
define preserve_allcc void @foo()#0 {
-; CHECK: foo Clobbered Registers: %cs %ds %eflags %eip %eiz %es %fpsw %fs %gs %ip %rip %riz %ss %ssp %bnd0 %bnd1 %bnd2 %bnd3 %cr0 %cr1 %cr2 %cr3 %cr4 %cr5 %cr6 %cr7 %cr8 %cr9 %cr10 %cr11 %cr12 %cr13 %cr14 %cr15 %dr0 %dr1 %dr2 %dr3 %dr4 %dr5 %dr6 %dr7 %dr8 %dr9 %dr10 %dr11 %dr12 %dr13 %dr14 %dr15 %fp0 %fp1 %fp2 %fp3 %fp4 %fp5 %fp6 %fp7 %k0 %k1 %k2 %k3 %k4 %k5 %k6 %k7 %mm0 %mm1 %mm2 %mm3 %mm4 %mm5 %mm6 %mm7 %r11 %st0 %st1 %st2 %st3 %st4 %st5 %st6 %st7 %xmm16 %xmm17 %xmm18 %xmm19 %xmm20 %xmm21 %xmm22 %xmm23 %xmm24 %xmm25 %xmm26 %xmm27 %xmm28 %xmm29 %xmm30 %xmm31 %ymm0 %ymm1 %ymm2 %ymm3 %ymm4 %ymm5 %ymm6 %ymm7 %ymm8 %ymm9 %ymm10 %ymm11 %ymm12 %ymm13 %ymm14 %ymm15 %ymm16 %ymm17 %ymm18 %ymm19 %ymm20 %ymm21 %ymm22 %ymm23 %ymm24 %ymm25 %ymm26 %ymm27 %ymm28 %ymm29 %ymm30 %ymm31 %zmm0 %zmm1 %zmm2 %zmm3 %zmm4 %zmm5 %zmm6 %zmm7 %zmm8 %zmm9 %zmm10 %zmm11 %zmm12 %zmm13 %zmm14 %zmm15 %zmm16 %zmm17 %zmm18 %zmm19 %zmm20 %zmm21 %zmm22 %zmm23 %zmm24 %zmm25 %zmm26 %zmm27 %zmm28 %zmm29 %zmm30 %zmm31 %r11b %r11d %r11w
+; CHECK: foo Clobbered Registers: $cs $ds $eflags $eip $eiz $es $fpsw $fs $gs $ip $rip $riz $ss $ssp $bnd0 $bnd1 $bnd2 $bnd3 $cr0 $cr1 $cr2 $cr3 $cr4 $cr5 $cr6 $cr7 $cr8 $cr9 $cr10 $cr11 $cr12 $cr13 $cr14 $cr15 $dr0 $dr1 $dr2 $dr3 $dr4 $dr5 $dr6 $dr7 $dr8 $dr9 $dr10 $dr11 $dr12 $dr13 $dr14 $dr15 $fp0 $fp1 $fp2 $fp3 $fp4 $fp5 $fp6 $fp7 $k0 $k1 $k2 $k3 $k4 $k5 $k6 $k7 $mm0 $mm1 $mm2 $mm3 $mm4 $mm5 $mm6 $mm7 $r11 $st0 $st1 $st2 $st3 $st4 $st5 $st6 $st7 $xmm16 $xmm17 $xmm18 $xmm19 $xmm20 $xmm21 $xmm22 $xmm23 $xmm24 $xmm25 $xmm26 $xmm27 $xmm28 $xmm29 $xmm30 $xmm31 $ymm0 $ymm1 $ymm2 $ymm3 $ymm4 $ymm5 $ymm6 $ymm7 $ymm8 $ymm9 $ymm10 $ymm11 $ymm12 $ymm13 $ymm14 $ymm15 $ymm16 $ymm17 $ymm18 $ymm19 $ymm20 $ymm21 $ymm22 $ymm23 $ymm24 $ymm25 $ymm26 $ymm27 $ymm28 $ymm29 $ymm30 $ymm31 $zmm0 $zmm1 $zmm2 $zmm3 $zmm4 $zmm5 $zmm6 $zmm7 $zmm8 $zmm9 $zmm10 $zmm11 $zmm12 $zmm13 $zmm14 $zmm15 $zmm16 $zmm17 $zmm18 $zmm19 $zmm20 $zmm21 $zmm22 $zmm23 $zmm24 $zmm25 $zmm26 $zmm27 $zmm28 $zmm29 $zmm30 $zmm31 $r11b $r11d $r11w
call void @bar1()
call void @bar2()
ret void
diff --git a/llvm/test/CodeGen/X86/lea-3.ll b/llvm/test/CodeGen/X86/lea-3.ll
index f32c782..26a3944 100644
--- a/llvm/test/CodeGen/X86/lea-3.ll
+++ b/llvm/test/CodeGen/X86/lea-3.ll
@@ -36,25 +36,25 @@
define i32 @test(i32 %a) {
; LNX1-LABEL: test:
; LNX1: # %bb.0:
-; LNX1-NEXT: # kill: def %edi killed %edi def %rdi
+; LNX1-NEXT: # kill: def $edi killed $edi def $rdi
; LNX1-NEXT: leal (%rdi,%rdi,2), %eax
; LNX1-NEXT: retq
;
; LNX2-LABEL: test:
; LNX2: # %bb.0:
-; LNX2-NEXT: # kill: def %edi killed %edi def %rdi
+; LNX2-NEXT: # kill: def $edi killed $edi def $rdi
; LNX2-NEXT: leal (%rdi,%rdi,2), %eax
; LNX2-NEXT: retq
;
; NACL-LABEL: test:
; NACL: # %bb.0:
-; NACL-NEXT: # kill: def %edi killed %edi def %rdi
+; NACL-NEXT: # kill: def $edi killed $edi def $rdi
; NACL-NEXT: leal (%rdi,%rdi,2), %eax
; NACL-NEXT: retq
;
; WIN-LABEL: test:
; WIN: # %bb.0:
-; WIN-NEXT: # kill: def %ecx killed %ecx def %rcx
+; WIN-NEXT: # kill: def $ecx killed $ecx def $rcx
; WIN-NEXT: leal (%rcx,%rcx,2), %eax
; WIN-NEXT: retq
%tmp2 = mul i32 %a, 3
diff --git a/llvm/test/CodeGen/X86/lea-opt-cse3.ll b/llvm/test/CodeGen/X86/lea-opt-cse3.ll
index d0b5a28..fb7fe12 100644
--- a/llvm/test/CodeGen/X86/lea-opt-cse3.ll
+++ b/llvm/test/CodeGen/X86/lea-opt-cse3.ll
@@ -5,8 +5,8 @@
define i32 @foo(i32 %a, i32 %b) local_unnamed_addr #0 {
; X64-LABEL: foo:
; X64: # %bb.0: # %entry
-; X64-NEXT: # kill: def %esi killed %esi def %rsi
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal 4(%rdi,%rsi,2), %ecx
; X64-NEXT: leal 4(%rdi,%rsi,4), %eax
; X64-NEXT: imull %ecx, %eax
@@ -33,8 +33,8 @@
define i32 @foo1(i32 %a, i32 %b) local_unnamed_addr #0 {
; X64-LABEL: foo1:
; X64: # %bb.0: # %entry
-; X64-NEXT: # kill: def %esi killed %esi def %rsi
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal 4(%rdi,%rsi,4), %ecx
; X64-NEXT: leal 4(%rdi,%rsi,8), %eax
; X64-NEXT: imull %ecx, %eax
@@ -61,8 +61,8 @@
define i32 @foo1_mult_basic_blocks(i32 %a, i32 %b) local_unnamed_addr #0 {
; X64-LABEL: foo1_mult_basic_blocks:
; X64: # %bb.0: # %entry
-; X64-NEXT: # kill: def %esi killed %esi def %rsi
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal 4(%rdi,%rsi,4), %ecx
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpl $10, %ecx
@@ -113,8 +113,8 @@
define i32 @foo1_mult_basic_blocks_illegal_scale(i32 %a, i32 %b) local_unnamed_addr #0 {
; X64-LABEL: foo1_mult_basic_blocks_illegal_scale:
; X64: # %bb.0: # %entry
-; X64-NEXT: # kill: def %esi killed %esi def %rsi
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal 4(%rdi,%rsi,2), %ecx
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpl $10, %ecx
diff --git a/llvm/test/CodeGen/X86/lea-opt-with-debug.mir b/llvm/test/CodeGen/X86/lea-opt-with-debug.mir
index 61e4069..c9ac04d 100644
--- a/llvm/test/CodeGen/X86/lea-opt-with-debug.mir
+++ b/llvm/test/CodeGen/X86/lea-opt-with-debug.mir
@@ -95,28 +95,28 @@
bb.0 (%ir-block.0):
successors: %bb.1(0x80000000)
- ; CHECK: %3:gr64_nosp = LEA64r %2, 2, %2, 0, %noreg, debug-location !13
- ; CHECK-NEXT: %4:gr64 = LEA64r %1, 4, %3, 0, %noreg, debug-location !13
- ; CHECK-NOT: %0:gr64 = LEA64r %1, 4, %3, 8, %noreg, debug-location !14
- ; CHECK: DBG_VALUE debug-use %4, debug-use %noreg, !11, !DIExpression(DW_OP_plus_uconst, 8, DW_OP_stack_value), debug-location !15
+ ; CHECK: %3:gr64_nosp = LEA64r %2, 2, %2, 0, $noreg, debug-location !13
+ ; CHECK-NEXT: %4:gr64 = LEA64r %1, 4, %3, 0, $noreg, debug-location !13
+ ; CHECK-NOT: %0:gr64 = LEA64r %1, 4, %3, 8, $noreg, debug-location !14
+ ; CHECK: DBG_VALUE debug-use %4, debug-use $noreg, !11, !DIExpression(DW_OP_plus_uconst, 8, DW_OP_stack_value), debug-location !15
- %1 = MOV64rm %rip, 1, %noreg, @c, %noreg, debug-location !13 :: (dereferenceable load 8 from @c)
- %2 = MOVSX64rm32 %rip, 1, %noreg, @a, %noreg, debug-location !13 :: (dereferenceable load 4 from @a)
- %3 = LEA64r %2, 2, %2, 0, %noreg, debug-location !13
- %4 = LEA64r %1, 4, %3, 0, %noreg, debug-location !13
+ %1 = MOV64rm $rip, 1, $noreg, @c, $noreg, debug-location !13 :: (dereferenceable load 8 from @c)
+ %2 = MOVSX64rm32 $rip, 1, $noreg, @a, $noreg, debug-location !13 :: (dereferenceable load 4 from @a)
+ %3 = LEA64r %2, 2, %2, 0, $noreg, debug-location !13
+ %4 = LEA64r %1, 4, %3, 0, $noreg, debug-location !13
%5 = COPY %4.sub_32bit, debug-location !13
- MOV32mr %rip, 1, %noreg, @d, %noreg, killed %5, debug-location !13 :: (store 4 into @d)
- %0 = LEA64r %1, 4, %3, 8, %noreg, debug-location !14
- DBG_VALUE debug-use %0, debug-use %noreg, !11, !DIExpression(), debug-location !15
+ MOV32mr $rip, 1, $noreg, @d, $noreg, killed %5, debug-location !13 :: (store 4 into @d)
+ %0 = LEA64r %1, 4, %3, 8, $noreg, debug-location !14
+ DBG_VALUE debug-use %0, debug-use $noreg, !11, !DIExpression(), debug-location !15
; CHECK-LABEL: bb.1 (%ir-block.8):
- ; CHECK: %6:gr32 = MOV32rm %4, 1, %noreg, 8, %noreg, debug-location !17 :: (load 4 from %ir.7)
+ ; CHECK: %6:gr32 = MOV32rm %4, 1, $noreg, 8, $noreg, debug-location !17 :: (load 4 from %ir.7)
bb.1 (%ir-block.8):
successors: %bb.1(0x80000000)
- %6 = MOV32rm %0, 1, %noreg, 0, %noreg, debug-location !17 :: (load 4 from %ir.7)
- MOV32mr %rip, 1, %noreg, @d, %noreg, killed %6, debug-location !17 :: (store 4 into @d)
+ %6 = MOV32rm %0, 1, $noreg, 0, $noreg, debug-location !17 :: (load 4 from %ir.7)
+ MOV32mr $rip, 1, $noreg, @d, $noreg, killed %6, debug-location !17 :: (store 4 into @d)
JMP_1 %bb.1, debug-location !18
...
diff --git a/llvm/test/CodeGen/X86/lea32-schedule.ll b/llvm/test/CodeGen/X86/lea32-schedule.ll
index b89ba4a..6d92673 100644
--- a/llvm/test/CodeGen/X86/lea32-schedule.ll
+++ b/llvm/test/CodeGen/X86/lea32-schedule.ll
@@ -14,13 +14,13 @@
define i32 @test_lea_offset(i32) {
; GENERIC-LABEL: test_lea_offset:
; GENERIC: # %bb.0:
-; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi
+; GENERIC-NEXT: # kill: def $edi killed $edi def $rdi
; GENERIC-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_offset:
; ATOM: # %bb.0:
-; ATOM-NEXT: # kill: def %edi killed %edi def %rdi
+; ATOM-NEXT: # kill: def $edi killed $edi def $rdi
; ATOM-NEXT: leal -24(%rdi), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -32,43 +32,43 @@
;
; SLM-LABEL: test_lea_offset:
; SLM: # %bb.0:
-; SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; SLM-NEXT: # kill: def $edi killed $edi def $rdi
; SLM-NEXT: leal -24(%rdi), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_offset:
; SANDY: # %bb.0:
-; SANDY-NEXT: # kill: def %edi killed %edi def %rdi
+; SANDY-NEXT: # kill: def $edi killed $edi def $rdi
; SANDY-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_offset:
; HASWELL: # %bb.0:
-; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi
+; HASWELL-NEXT: # kill: def $edi killed $edi def $rdi
; HASWELL-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [7:1.00]
;
; BROADWELL-LABEL: test_lea_offset:
; BROADWELL: # %bb.0:
-; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi
+; BROADWELL-NEXT: # kill: def $edi killed $edi def $rdi
; BROADWELL-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_offset:
; SKYLAKE: # %bb.0:
-; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi
+; SKYLAKE-NEXT: # kill: def $edi killed $edi def $rdi
; SKYLAKE-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_offset:
; BTVER2: # %bb.0:
-; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi
+; BTVER2-NEXT: # kill: def $edi killed $edi def $rdi
; BTVER2-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_offset:
; ZNVER1: # %bb.0:
-; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi
+; ZNVER1-NEXT: # kill: def $edi killed $edi def $rdi
; ZNVER1-NEXT: leal -24(%rdi), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = add nsw i32 %0, -24
@@ -78,13 +78,13 @@
define i32 @test_lea_offset_big(i32) {
; GENERIC-LABEL: test_lea_offset_big:
; GENERIC: # %bb.0:
-; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi
+; GENERIC-NEXT: # kill: def $edi killed $edi def $rdi
; GENERIC-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_offset_big:
; ATOM: # %bb.0:
-; ATOM-NEXT: # kill: def %edi killed %edi def %rdi
+; ATOM-NEXT: # kill: def $edi killed $edi def $rdi
; ATOM-NEXT: leal 1024(%rdi), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -96,43 +96,43 @@
;
; SLM-LABEL: test_lea_offset_big:
; SLM: # %bb.0:
-; SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; SLM-NEXT: # kill: def $edi killed $edi def $rdi
; SLM-NEXT: leal 1024(%rdi), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_offset_big:
; SANDY: # %bb.0:
-; SANDY-NEXT: # kill: def %edi killed %edi def %rdi
+; SANDY-NEXT: # kill: def $edi killed $edi def $rdi
; SANDY-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_offset_big:
; HASWELL: # %bb.0:
-; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi
+; HASWELL-NEXT: # kill: def $edi killed $edi def $rdi
; HASWELL-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [7:1.00]
;
; BROADWELL-LABEL: test_lea_offset_big:
; BROADWELL: # %bb.0:
-; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi
+; BROADWELL-NEXT: # kill: def $edi killed $edi def $rdi
; BROADWELL-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_offset_big:
; SKYLAKE: # %bb.0:
-; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi
+; SKYLAKE-NEXT: # kill: def $edi killed $edi def $rdi
; SKYLAKE-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_offset_big:
; BTVER2: # %bb.0:
-; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi
+; BTVER2-NEXT: # kill: def $edi killed $edi def $rdi
; BTVER2-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_offset_big:
; ZNVER1: # %bb.0:
-; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi
+; ZNVER1-NEXT: # kill: def $edi killed $edi def $rdi
; ZNVER1-NEXT: leal 1024(%rdi), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = add nsw i32 %0, 1024
@@ -143,15 +143,15 @@
define i32 @test_lea_add(i32, i32) {
; GENERIC-LABEL: test_lea_add:
; GENERIC: # %bb.0:
-; GENERIC-NEXT: # kill: def %esi killed %esi def %rsi
-; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi
+; GENERIC-NEXT: # kill: def $esi killed $esi def $rsi
+; GENERIC-NEXT: # kill: def $edi killed $edi def $rdi
; GENERIC-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_add:
; ATOM: # %bb.0:
-; ATOM-NEXT: # kill: def %esi killed %esi def %rsi
-; ATOM-NEXT: # kill: def %edi killed %edi def %rdi
+; ATOM-NEXT: # kill: def $esi killed $esi def $rsi
+; ATOM-NEXT: # kill: def $edi killed $edi def $rdi
; ATOM-NEXT: leal (%rdi,%rsi), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -163,50 +163,50 @@
;
; SLM-LABEL: test_lea_add:
; SLM: # %bb.0:
-; SLM-NEXT: # kill: def %esi killed %esi def %rsi
-; SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; SLM-NEXT: # kill: def $esi killed $esi def $rsi
+; SLM-NEXT: # kill: def $edi killed $edi def $rdi
; SLM-NEXT: leal (%rdi,%rsi), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add:
; SANDY: # %bb.0:
-; SANDY-NEXT: # kill: def %esi killed %esi def %rsi
-; SANDY-NEXT: # kill: def %edi killed %edi def %rdi
+; SANDY-NEXT: # kill: def $esi killed $esi def $rsi
+; SANDY-NEXT: # kill: def $edi killed $edi def $rdi
; SANDY-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_add:
; HASWELL: # %bb.0:
-; HASWELL-NEXT: # kill: def %esi killed %esi def %rsi
-; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi
+; HASWELL-NEXT: # kill: def $esi killed $esi def $rsi
+; HASWELL-NEXT: # kill: def $edi killed $edi def $rdi
; HASWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [7:1.00]
;
; BROADWELL-LABEL: test_lea_add:
; BROADWELL: # %bb.0:
-; BROADWELL-NEXT: # kill: def %esi killed %esi def %rsi
-; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi
+; BROADWELL-NEXT: # kill: def $esi killed $esi def $rsi
+; BROADWELL-NEXT: # kill: def $edi killed $edi def $rdi
; BROADWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_add:
; SKYLAKE: # %bb.0:
-; SKYLAKE-NEXT: # kill: def %esi killed %esi def %rsi
-; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi
+; SKYLAKE-NEXT: # kill: def $esi killed $esi def $rsi
+; SKYLAKE-NEXT: # kill: def $edi killed $edi def $rdi
; SKYLAKE-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_add:
; BTVER2: # %bb.0:
-; BTVER2-NEXT: # kill: def %esi killed %esi def %rsi
-; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi
+; BTVER2-NEXT: # kill: def $esi killed $esi def $rsi
+; BTVER2-NEXT: # kill: def $edi killed $edi def $rdi
; BTVER2-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add:
; ZNVER1: # %bb.0:
-; ZNVER1-NEXT: # kill: def %esi killed %esi def %rsi
-; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi
+; ZNVER1-NEXT: # kill: def $esi killed $esi def $rsi
+; ZNVER1-NEXT: # kill: def $edi killed $edi def $rdi
; ZNVER1-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = add nsw i32 %1, %0
@@ -216,16 +216,16 @@
define i32 @test_lea_add_offset(i32, i32) {
; GENERIC-LABEL: test_lea_add_offset:
; GENERIC: # %bb.0:
-; GENERIC-NEXT: # kill: def %esi killed %esi def %rsi
-; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi
+; GENERIC-NEXT: # kill: def $esi killed $esi def $rsi
+; GENERIC-NEXT: # kill: def $edi killed $edi def $rdi
; GENERIC-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; GENERIC-NEXT: addl $16, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_add_offset:
; ATOM: # %bb.0:
-; ATOM-NEXT: # kill: def %esi killed %esi def %rsi
-; ATOM-NEXT: # kill: def %edi killed %edi def %rdi
+; ATOM-NEXT: # kill: def $esi killed $esi def $rsi
+; ATOM-NEXT: # kill: def $edi killed $edi def $rdi
; ATOM-NEXT: leal 16(%rdi,%rsi), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -237,54 +237,54 @@
;
; SLM-LABEL: test_lea_add_offset:
; SLM: # %bb.0:
-; SLM-NEXT: # kill: def %esi killed %esi def %rsi
-; SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; SLM-NEXT: # kill: def $esi killed $esi def $rsi
+; SLM-NEXT: # kill: def $edi killed $edi def $rdi
; SLM-NEXT: leal 16(%rdi,%rsi), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add_offset:
; SANDY: # %bb.0:
-; SANDY-NEXT: # kill: def %esi killed %esi def %rsi
-; SANDY-NEXT: # kill: def %edi killed %edi def %rdi
+; SANDY-NEXT: # kill: def $esi killed $esi def $rsi
+; SANDY-NEXT: # kill: def $edi killed $edi def $rdi
; SANDY-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; SANDY-NEXT: addl $16, %eax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_add_offset:
; HASWELL: # %bb.0:
-; HASWELL-NEXT: # kill: def %esi killed %esi def %rsi
-; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi
+; HASWELL-NEXT: # kill: def $esi killed $esi def $rsi
+; HASWELL-NEXT: # kill: def $edi killed $edi def $rdi
; HASWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; HASWELL-NEXT: addl $16, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [7:1.00]
;
; BROADWELL-LABEL: test_lea_add_offset:
; BROADWELL: # %bb.0:
-; BROADWELL-NEXT: # kill: def %esi killed %esi def %rsi
-; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi
+; BROADWELL-NEXT: # kill: def $esi killed $esi def $rsi
+; BROADWELL-NEXT: # kill: def $edi killed $edi def $rdi
; BROADWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; BROADWELL-NEXT: addl $16, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_add_offset:
; SKYLAKE: # %bb.0:
-; SKYLAKE-NEXT: # kill: def %esi killed %esi def %rsi
-; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi
+; SKYLAKE-NEXT: # kill: def $esi killed $esi def $rsi
+; SKYLAKE-NEXT: # kill: def $edi killed $edi def $rdi
; SKYLAKE-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: addl $16, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_add_offset:
; BTVER2: # %bb.0:
-; BTVER2-NEXT: # kill: def %esi killed %esi def %rsi
-; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi
+; BTVER2-NEXT: # kill: def $esi killed $esi def $rsi
+; BTVER2-NEXT: # kill: def $edi killed $edi def $rdi
; BTVER2-NEXT: leal 16(%rdi,%rsi), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add_offset:
; ZNVER1: # %bb.0:
-; ZNVER1-NEXT: # kill: def %esi killed %esi def %rsi
-; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi
+; ZNVER1-NEXT: # kill: def $esi killed $esi def $rsi
+; ZNVER1-NEXT: # kill: def $edi killed $edi def $rdi
; ZNVER1-NEXT: leal 16(%rdi,%rsi), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = add i32 %0, 16
@@ -295,8 +295,8 @@
define i32 @test_lea_add_offset_big(i32, i32) {
; GENERIC-LABEL: test_lea_add_offset_big:
; GENERIC: # %bb.0:
-; GENERIC-NEXT: # kill: def %esi killed %esi def %rsi
-; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi
+; GENERIC-NEXT: # kill: def $esi killed $esi def $rsi
+; GENERIC-NEXT: # kill: def $edi killed $edi def $rdi
; GENERIC-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; GENERIC-NEXT: addl $-4096, %eax # imm = 0xF000
; GENERIC-NEXT: # sched: [1:0.33]
@@ -304,8 +304,8 @@
;
; ATOM-LABEL: test_lea_add_offset_big:
; ATOM: # %bb.0:
-; ATOM-NEXT: # kill: def %esi killed %esi def %rsi
-; ATOM-NEXT: # kill: def %edi killed %edi def %rdi
+; ATOM-NEXT: # kill: def $esi killed $esi def $rsi
+; ATOM-NEXT: # kill: def $edi killed $edi def $rdi
; ATOM-NEXT: leal -4096(%rdi,%rsi), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -317,15 +317,15 @@
;
; SLM-LABEL: test_lea_add_offset_big:
; SLM: # %bb.0:
-; SLM-NEXT: # kill: def %esi killed %esi def %rsi
-; SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; SLM-NEXT: # kill: def $esi killed $esi def $rsi
+; SLM-NEXT: # kill: def $edi killed $edi def $rdi
; SLM-NEXT: leal -4096(%rdi,%rsi), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add_offset_big:
; SANDY: # %bb.0:
-; SANDY-NEXT: # kill: def %esi killed %esi def %rsi
-; SANDY-NEXT: # kill: def %edi killed %edi def %rdi
+; SANDY-NEXT: # kill: def $esi killed $esi def $rsi
+; SANDY-NEXT: # kill: def $edi killed $edi def $rdi
; SANDY-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; SANDY-NEXT: addl $-4096, %eax # imm = 0xF000
; SANDY-NEXT: # sched: [1:0.33]
@@ -333,8 +333,8 @@
;
; HASWELL-LABEL: test_lea_add_offset_big:
; HASWELL: # %bb.0:
-; HASWELL-NEXT: # kill: def %esi killed %esi def %rsi
-; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi
+; HASWELL-NEXT: # kill: def $esi killed $esi def $rsi
+; HASWELL-NEXT: # kill: def $edi killed $edi def $rdi
; HASWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; HASWELL-NEXT: addl $-4096, %eax # imm = 0xF000
; HASWELL-NEXT: # sched: [1:0.25]
@@ -342,8 +342,8 @@
;
; BROADWELL-LABEL: test_lea_add_offset_big:
; BROADWELL: # %bb.0:
-; BROADWELL-NEXT: # kill: def %esi killed %esi def %rsi
-; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi
+; BROADWELL-NEXT: # kill: def $esi killed $esi def $rsi
+; BROADWELL-NEXT: # kill: def $edi killed $edi def $rdi
; BROADWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; BROADWELL-NEXT: addl $-4096, %eax # imm = 0xF000
; BROADWELL-NEXT: # sched: [1:0.25]
@@ -351,8 +351,8 @@
;
; SKYLAKE-LABEL: test_lea_add_offset_big:
; SKYLAKE: # %bb.0:
-; SKYLAKE-NEXT: # kill: def %esi killed %esi def %rsi
-; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi
+; SKYLAKE-NEXT: # kill: def $esi killed $esi def $rsi
+; SKYLAKE-NEXT: # kill: def $edi killed $edi def $rdi
; SKYLAKE-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: addl $-4096, %eax # imm = 0xF000
; SKYLAKE-NEXT: # sched: [1:0.25]
@@ -360,15 +360,15 @@
;
; BTVER2-LABEL: test_lea_add_offset_big:
; BTVER2: # %bb.0:
-; BTVER2-NEXT: # kill: def %esi killed %esi def %rsi
-; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi
+; BTVER2-NEXT: # kill: def $esi killed $esi def $rsi
+; BTVER2-NEXT: # kill: def $edi killed $edi def $rdi
; BTVER2-NEXT: leal -4096(%rdi,%rsi), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add_offset_big:
; ZNVER1: # %bb.0:
-; ZNVER1-NEXT: # kill: def %esi killed %esi def %rsi
-; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi
+; ZNVER1-NEXT: # kill: def $esi killed $esi def $rsi
+; ZNVER1-NEXT: # kill: def $edi killed $edi def $rdi
; ZNVER1-NEXT: leal -4096(%rdi,%rsi), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = add i32 %0, -4096
@@ -379,13 +379,13 @@
define i32 @test_lea_mul(i32) {
; GENERIC-LABEL: test_lea_mul:
; GENERIC: # %bb.0:
-; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi
+; GENERIC-NEXT: # kill: def $edi killed $edi def $rdi
; GENERIC-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_mul:
; ATOM: # %bb.0:
-; ATOM-NEXT: # kill: def %edi killed %edi def %rdi
+; ATOM-NEXT: # kill: def $edi killed $edi def $rdi
; ATOM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -397,43 +397,43 @@
;
; SLM-LABEL: test_lea_mul:
; SLM: # %bb.0:
-; SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; SLM-NEXT: # kill: def $edi killed $edi def $rdi
; SLM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_mul:
; SANDY: # %bb.0:
-; SANDY-NEXT: # kill: def %edi killed %edi def %rdi
+; SANDY-NEXT: # kill: def $edi killed $edi def $rdi
; SANDY-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_mul:
; HASWELL: # %bb.0:
-; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi
+; HASWELL-NEXT: # kill: def $edi killed $edi def $rdi
; HASWELL-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [7:1.00]
;
; BROADWELL-LABEL: test_lea_mul:
; BROADWELL: # %bb.0:
-; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi
+; BROADWELL-NEXT: # kill: def $edi killed $edi def $rdi
; BROADWELL-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_mul:
; SKYLAKE: # %bb.0:
-; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi
+; SKYLAKE-NEXT: # kill: def $edi killed $edi def $rdi
; SKYLAKE-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_mul:
; BTVER2: # %bb.0:
-; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi
+; BTVER2-NEXT: # kill: def $edi killed $edi def $rdi
; BTVER2-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_mul:
; ZNVER1: # %bb.0:
-; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi
+; ZNVER1-NEXT: # kill: def $edi killed $edi def $rdi
; ZNVER1-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = mul nsw i32 %0, 3
@@ -443,14 +443,14 @@
define i32 @test_lea_mul_offset(i32) {
; GENERIC-LABEL: test_lea_mul_offset:
; GENERIC: # %bb.0:
-; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi
+; GENERIC-NEXT: # kill: def $edi killed $edi def $rdi
; GENERIC-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; GENERIC-NEXT: addl $-32, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_mul_offset:
; ATOM: # %bb.0:
-; ATOM-NEXT: # kill: def %edi killed %edi def %rdi
+; ATOM-NEXT: # kill: def $edi killed $edi def $rdi
; ATOM-NEXT: leal -32(%rdi,%rdi,2), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -462,47 +462,47 @@
;
; SLM-LABEL: test_lea_mul_offset:
; SLM: # %bb.0:
-; SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; SLM-NEXT: # kill: def $edi killed $edi def $rdi
; SLM-NEXT: leal -32(%rdi,%rdi,2), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_mul_offset:
; SANDY: # %bb.0:
-; SANDY-NEXT: # kill: def %edi killed %edi def %rdi
+; SANDY-NEXT: # kill: def $edi killed $edi def $rdi
; SANDY-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; SANDY-NEXT: addl $-32, %eax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_mul_offset:
; HASWELL: # %bb.0:
-; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi
+; HASWELL-NEXT: # kill: def $edi killed $edi def $rdi
; HASWELL-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; HASWELL-NEXT: addl $-32, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [7:1.00]
;
; BROADWELL-LABEL: test_lea_mul_offset:
; BROADWELL: # %bb.0:
-; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi
+; BROADWELL-NEXT: # kill: def $edi killed $edi def $rdi
; BROADWELL-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; BROADWELL-NEXT: addl $-32, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_mul_offset:
; SKYLAKE: # %bb.0:
-; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi
+; SKYLAKE-NEXT: # kill: def $edi killed $edi def $rdi
; SKYLAKE-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: addl $-32, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_mul_offset:
; BTVER2: # %bb.0:
-; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi
+; BTVER2-NEXT: # kill: def $edi killed $edi def $rdi
; BTVER2-NEXT: leal -32(%rdi,%rdi,2), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_mul_offset:
; ZNVER1: # %bb.0:
-; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi
+; ZNVER1-NEXT: # kill: def $edi killed $edi def $rdi
; ZNVER1-NEXT: leal -32(%rdi,%rdi,2), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = mul nsw i32 %0, 3
@@ -513,7 +513,7 @@
define i32 @test_lea_mul_offset_big(i32) {
; GENERIC-LABEL: test_lea_mul_offset_big:
; GENERIC: # %bb.0:
-; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi
+; GENERIC-NEXT: # kill: def $edi killed $edi def $rdi
; GENERIC-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; GENERIC-NEXT: addl $10000, %eax # imm = 0x2710
; GENERIC-NEXT: # sched: [1:0.33]
@@ -521,7 +521,7 @@
;
; ATOM-LABEL: test_lea_mul_offset_big:
; ATOM: # %bb.0:
-; ATOM-NEXT: # kill: def %edi killed %edi def %rdi
+; ATOM-NEXT: # kill: def $edi killed $edi def $rdi
; ATOM-NEXT: leal 10000(%rdi,%rdi,8), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -533,13 +533,13 @@
;
; SLM-LABEL: test_lea_mul_offset_big:
; SLM: # %bb.0:
-; SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; SLM-NEXT: # kill: def $edi killed $edi def $rdi
; SLM-NEXT: leal 10000(%rdi,%rdi,8), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_mul_offset_big:
; SANDY: # %bb.0:
-; SANDY-NEXT: # kill: def %edi killed %edi def %rdi
+; SANDY-NEXT: # kill: def $edi killed $edi def $rdi
; SANDY-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; SANDY-NEXT: addl $10000, %eax # imm = 0x2710
; SANDY-NEXT: # sched: [1:0.33]
@@ -547,7 +547,7 @@
;
; HASWELL-LABEL: test_lea_mul_offset_big:
; HASWELL: # %bb.0:
-; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi
+; HASWELL-NEXT: # kill: def $edi killed $edi def $rdi
; HASWELL-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; HASWELL-NEXT: addl $10000, %eax # imm = 0x2710
; HASWELL-NEXT: # sched: [1:0.25]
@@ -555,7 +555,7 @@
;
; BROADWELL-LABEL: test_lea_mul_offset_big:
; BROADWELL: # %bb.0:
-; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi
+; BROADWELL-NEXT: # kill: def $edi killed $edi def $rdi
; BROADWELL-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; BROADWELL-NEXT: addl $10000, %eax # imm = 0x2710
; BROADWELL-NEXT: # sched: [1:0.25]
@@ -563,7 +563,7 @@
;
; SKYLAKE-LABEL: test_lea_mul_offset_big:
; SKYLAKE: # %bb.0:
-; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi
+; SKYLAKE-NEXT: # kill: def $edi killed $edi def $rdi
; SKYLAKE-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: addl $10000, %eax # imm = 0x2710
; SKYLAKE-NEXT: # sched: [1:0.25]
@@ -571,13 +571,13 @@
;
; BTVER2-LABEL: test_lea_mul_offset_big:
; BTVER2: # %bb.0:
-; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi
+; BTVER2-NEXT: # kill: def $edi killed $edi def $rdi
; BTVER2-NEXT: leal 10000(%rdi,%rdi,8), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_mul_offset_big:
; ZNVER1: # %bb.0:
-; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi
+; ZNVER1-NEXT: # kill: def $edi killed $edi def $rdi
; ZNVER1-NEXT: leal 10000(%rdi,%rdi,8), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = mul nsw i32 %0, 9
@@ -588,15 +588,15 @@
define i32 @test_lea_add_scale(i32, i32) {
; GENERIC-LABEL: test_lea_add_scale:
; GENERIC: # %bb.0:
-; GENERIC-NEXT: # kill: def %esi killed %esi def %rsi
-; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi
+; GENERIC-NEXT: # kill: def $esi killed $esi def $rsi
+; GENERIC-NEXT: # kill: def $edi killed $edi def $rdi
; GENERIC-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_add_scale:
; ATOM: # %bb.0:
-; ATOM-NEXT: # kill: def %esi killed %esi def %rsi
-; ATOM-NEXT: # kill: def %edi killed %edi def %rdi
+; ATOM-NEXT: # kill: def $esi killed $esi def $rsi
+; ATOM-NEXT: # kill: def $edi killed $edi def $rdi
; ATOM-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -608,50 +608,50 @@
;
; SLM-LABEL: test_lea_add_scale:
; SLM: # %bb.0:
-; SLM-NEXT: # kill: def %esi killed %esi def %rsi
-; SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; SLM-NEXT: # kill: def $esi killed $esi def $rsi
+; SLM-NEXT: # kill: def $edi killed $edi def $rdi
; SLM-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add_scale:
; SANDY: # %bb.0:
-; SANDY-NEXT: # kill: def %esi killed %esi def %rsi
-; SANDY-NEXT: # kill: def %edi killed %edi def %rdi
+; SANDY-NEXT: # kill: def $esi killed $esi def $rsi
+; SANDY-NEXT: # kill: def $edi killed $edi def $rdi
; SANDY-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_add_scale:
; HASWELL: # %bb.0:
-; HASWELL-NEXT: # kill: def %esi killed %esi def %rsi
-; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi
+; HASWELL-NEXT: # kill: def $esi killed $esi def $rsi
+; HASWELL-NEXT: # kill: def $edi killed $edi def $rdi
; HASWELL-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [7:1.00]
;
; BROADWELL-LABEL: test_lea_add_scale:
; BROADWELL: # %bb.0:
-; BROADWELL-NEXT: # kill: def %esi killed %esi def %rsi
-; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi
+; BROADWELL-NEXT: # kill: def $esi killed $esi def $rsi
+; BROADWELL-NEXT: # kill: def $edi killed $edi def $rdi
; BROADWELL-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_add_scale:
; SKYLAKE: # %bb.0:
-; SKYLAKE-NEXT: # kill: def %esi killed %esi def %rsi
-; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi
+; SKYLAKE-NEXT: # kill: def $esi killed $esi def $rsi
+; SKYLAKE-NEXT: # kill: def $edi killed $edi def $rdi
; SKYLAKE-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_add_scale:
; BTVER2: # %bb.0:
-; BTVER2-NEXT: # kill: def %esi killed %esi def %rsi
-; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi
+; BTVER2-NEXT: # kill: def $esi killed $esi def $rsi
+; BTVER2-NEXT: # kill: def $edi killed $edi def $rdi
; BTVER2-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add_scale:
; ZNVER1: # %bb.0:
-; ZNVER1-NEXT: # kill: def %esi killed %esi def %rsi
-; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi
+; ZNVER1-NEXT: # kill: def $esi killed $esi def $rsi
+; ZNVER1-NEXT: # kill: def $edi killed $edi def $rdi
; ZNVER1-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = shl i32 %1, 1
@@ -662,16 +662,16 @@
define i32 @test_lea_add_scale_offset(i32, i32) {
; GENERIC-LABEL: test_lea_add_scale_offset:
; GENERIC: # %bb.0:
-; GENERIC-NEXT: # kill: def %esi killed %esi def %rsi
-; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi
+; GENERIC-NEXT: # kill: def $esi killed $esi def $rsi
+; GENERIC-NEXT: # kill: def $edi killed $edi def $rdi
; GENERIC-NEXT: leal (%rdi,%rsi,4), %eax # sched: [1:0.50]
; GENERIC-NEXT: addl $96, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_add_scale_offset:
; ATOM: # %bb.0:
-; ATOM-NEXT: # kill: def %esi killed %esi def %rsi
-; ATOM-NEXT: # kill: def %edi killed %edi def %rdi
+; ATOM-NEXT: # kill: def $esi killed $esi def $rsi
+; ATOM-NEXT: # kill: def $edi killed $edi def $rdi
; ATOM-NEXT: leal 96(%rdi,%rsi,4), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -683,54 +683,54 @@
;
; SLM-LABEL: test_lea_add_scale_offset:
; SLM: # %bb.0:
-; SLM-NEXT: # kill: def %esi killed %esi def %rsi
-; SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; SLM-NEXT: # kill: def $esi killed $esi def $rsi
+; SLM-NEXT: # kill: def $edi killed $edi def $rdi
; SLM-NEXT: leal 96(%rdi,%rsi,4), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add_scale_offset:
; SANDY: # %bb.0:
-; SANDY-NEXT: # kill: def %esi killed %esi def %rsi
-; SANDY-NEXT: # kill: def %edi killed %edi def %rdi
+; SANDY-NEXT: # kill: def $esi killed $esi def $rsi
+; SANDY-NEXT: # kill: def $edi killed $edi def $rdi
; SANDY-NEXT: leal (%rdi,%rsi,4), %eax # sched: [1:0.50]
; SANDY-NEXT: addl $96, %eax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_add_scale_offset:
; HASWELL: # %bb.0:
-; HASWELL-NEXT: # kill: def %esi killed %esi def %rsi
-; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi
+; HASWELL-NEXT: # kill: def $esi killed $esi def $rsi
+; HASWELL-NEXT: # kill: def $edi killed $edi def $rdi
; HASWELL-NEXT: leal (%rdi,%rsi,4), %eax # sched: [1:0.50]
; HASWELL-NEXT: addl $96, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [7:1.00]
;
; BROADWELL-LABEL: test_lea_add_scale_offset:
; BROADWELL: # %bb.0:
-; BROADWELL-NEXT: # kill: def %esi killed %esi def %rsi
-; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi
+; BROADWELL-NEXT: # kill: def $esi killed $esi def $rsi
+; BROADWELL-NEXT: # kill: def $edi killed $edi def $rdi
; BROADWELL-NEXT: leal (%rdi,%rsi,4), %eax # sched: [1:0.50]
; BROADWELL-NEXT: addl $96, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_add_scale_offset:
; SKYLAKE: # %bb.0:
-; SKYLAKE-NEXT: # kill: def %esi killed %esi def %rsi
-; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi
+; SKYLAKE-NEXT: # kill: def $esi killed $esi def $rsi
+; SKYLAKE-NEXT: # kill: def $edi killed $edi def $rdi
; SKYLAKE-NEXT: leal (%rdi,%rsi,4), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: addl $96, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_add_scale_offset:
; BTVER2: # %bb.0:
-; BTVER2-NEXT: # kill: def %esi killed %esi def %rsi
-; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi
+; BTVER2-NEXT: # kill: def $esi killed $esi def $rsi
+; BTVER2-NEXT: # kill: def $edi killed $edi def $rdi
; BTVER2-NEXT: leal 96(%rdi,%rsi,4), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add_scale_offset:
; ZNVER1: # %bb.0:
-; ZNVER1-NEXT: # kill: def %esi killed %esi def %rsi
-; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi
+; ZNVER1-NEXT: # kill: def $esi killed $esi def $rsi
+; ZNVER1-NEXT: # kill: def $edi killed $edi def $rdi
; ZNVER1-NEXT: leal 96(%rdi,%rsi,4), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = shl i32 %1, 2
@@ -742,8 +742,8 @@
define i32 @test_lea_add_scale_offset_big(i32, i32) {
; GENERIC-LABEL: test_lea_add_scale_offset_big:
; GENERIC: # %bb.0:
-; GENERIC-NEXT: # kill: def %esi killed %esi def %rsi
-; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi
+; GENERIC-NEXT: # kill: def $esi killed $esi def $rsi
+; GENERIC-NEXT: # kill: def $edi killed $edi def $rdi
; GENERIC-NEXT: leal (%rdi,%rsi,8), %eax # sched: [1:0.50]
; GENERIC-NEXT: addl $-1200, %eax # imm = 0xFB50
; GENERIC-NEXT: # sched: [1:0.33]
@@ -751,8 +751,8 @@
;
; ATOM-LABEL: test_lea_add_scale_offset_big:
; ATOM: # %bb.0:
-; ATOM-NEXT: # kill: def %esi killed %esi def %rsi
-; ATOM-NEXT: # kill: def %edi killed %edi def %rdi
+; ATOM-NEXT: # kill: def $esi killed $esi def $rsi
+; ATOM-NEXT: # kill: def $edi killed $edi def $rdi
; ATOM-NEXT: leal -1200(%rdi,%rsi,8), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -764,15 +764,15 @@
;
; SLM-LABEL: test_lea_add_scale_offset_big:
; SLM: # %bb.0:
-; SLM-NEXT: # kill: def %esi killed %esi def %rsi
-; SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; SLM-NEXT: # kill: def $esi killed $esi def $rsi
+; SLM-NEXT: # kill: def $edi killed $edi def $rdi
; SLM-NEXT: leal -1200(%rdi,%rsi,8), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add_scale_offset_big:
; SANDY: # %bb.0:
-; SANDY-NEXT: # kill: def %esi killed %esi def %rsi
-; SANDY-NEXT: # kill: def %edi killed %edi def %rdi
+; SANDY-NEXT: # kill: def $esi killed $esi def $rsi
+; SANDY-NEXT: # kill: def $edi killed $edi def $rdi
; SANDY-NEXT: leal (%rdi,%rsi,8), %eax # sched: [1:0.50]
; SANDY-NEXT: addl $-1200, %eax # imm = 0xFB50
; SANDY-NEXT: # sched: [1:0.33]
@@ -780,8 +780,8 @@
;
; HASWELL-LABEL: test_lea_add_scale_offset_big:
; HASWELL: # %bb.0:
-; HASWELL-NEXT: # kill: def %esi killed %esi def %rsi
-; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi
+; HASWELL-NEXT: # kill: def $esi killed $esi def $rsi
+; HASWELL-NEXT: # kill: def $edi killed $edi def $rdi
; HASWELL-NEXT: leal (%rdi,%rsi,8), %eax # sched: [1:0.50]
; HASWELL-NEXT: addl $-1200, %eax # imm = 0xFB50
; HASWELL-NEXT: # sched: [1:0.25]
@@ -789,8 +789,8 @@
;
; BROADWELL-LABEL: test_lea_add_scale_offset_big:
; BROADWELL: # %bb.0:
-; BROADWELL-NEXT: # kill: def %esi killed %esi def %rsi
-; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi
+; BROADWELL-NEXT: # kill: def $esi killed $esi def $rsi
+; BROADWELL-NEXT: # kill: def $edi killed $edi def $rdi
; BROADWELL-NEXT: leal (%rdi,%rsi,8), %eax # sched: [1:0.50]
; BROADWELL-NEXT: addl $-1200, %eax # imm = 0xFB50
; BROADWELL-NEXT: # sched: [1:0.25]
@@ -798,8 +798,8 @@
;
; SKYLAKE-LABEL: test_lea_add_scale_offset_big:
; SKYLAKE: # %bb.0:
-; SKYLAKE-NEXT: # kill: def %esi killed %esi def %rsi
-; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi
+; SKYLAKE-NEXT: # kill: def $esi killed $esi def $rsi
+; SKYLAKE-NEXT: # kill: def $edi killed $edi def $rdi
; SKYLAKE-NEXT: leal (%rdi,%rsi,8), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: addl $-1200, %eax # imm = 0xFB50
; SKYLAKE-NEXT: # sched: [1:0.25]
@@ -807,15 +807,15 @@
;
; BTVER2-LABEL: test_lea_add_scale_offset_big:
; BTVER2: # %bb.0:
-; BTVER2-NEXT: # kill: def %esi killed %esi def %rsi
-; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi
+; BTVER2-NEXT: # kill: def $esi killed $esi def $rsi
+; BTVER2-NEXT: # kill: def $edi killed $edi def $rdi
; BTVER2-NEXT: leal -1200(%rdi,%rsi,8), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add_scale_offset_big:
; ZNVER1: # %bb.0:
-; ZNVER1-NEXT: # kill: def %esi killed %esi def %rsi
-; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi
+; ZNVER1-NEXT: # kill: def $esi killed $esi def $rsi
+; ZNVER1-NEXT: # kill: def $edi killed $edi def $rdi
; ZNVER1-NEXT: leal -1200(%rdi,%rsi,8), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = shl i32 %1, 3
diff --git a/llvm/test/CodeGen/X86/leaFixup32.mir b/llvm/test/CodeGen/X86/leaFixup32.mir
index b7de88f..60767c7 100644
--- a/llvm/test/CodeGen/X86/leaFixup32.mir
+++ b/llvm/test/CodeGen/X86/leaFixup32.mir
@@ -85,8 +85,8 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%eax' }
- - { reg: '%ebp' }
+ - { reg: '$eax' }
+ - { reg: '$ebp' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -103,12 +103,12 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %eax, %ebp
- ; CHECK: %eax = ADD32rr %eax, killed %ebp
- ; CHECK: %eax = ADD32ri8 %eax, -5
+ liveins: $eax, $ebp
+ ; CHECK: $eax = ADD32rr $eax, killed $ebp
+ ; CHECK: $eax = ADD32ri8 $eax, -5
- %eax = LEA32r killed %eax, 1, killed %ebp, -5, %noreg
- RETQ %eax
+ $eax = LEA32r killed $eax, 1, killed $ebp, -5, $noreg
+ RETQ $eax
...
---
@@ -120,8 +120,8 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%eax' }
- - { reg: '%ebp' }
+ - { reg: '$eax' }
+ - { reg: '$ebp' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -138,12 +138,12 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %eax, %ebp
- ; CHECK: %ebp = ADD32rr %ebp, killed %eax
- ; CHECK: %ebp = ADD32ri8 %ebp, -5
+ liveins: $eax, $ebp
+ ; CHECK: $ebp = ADD32rr $ebp, killed $eax
+ ; CHECK: $ebp = ADD32ri8 $ebp, -5
- %ebp = LEA32r killed %ebp, 1, killed %eax, -5, %noreg
- RETQ %ebp
+ $ebp = LEA32r killed $ebp, 1, killed $eax, -5, $noreg
+ RETQ $ebp
...
---
@@ -155,8 +155,8 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%eax' }
- - { reg: '%ebp' }
+ - { reg: '$eax' }
+ - { reg: '$ebp' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -173,11 +173,11 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %eax, %ebp
- ; CHECK: %ebp = ADD32rr %ebp, killed %eax
+ liveins: $eax, $ebp
+ ; CHECK: $ebp = ADD32rr $ebp, killed $eax
- %ebp = LEA32r killed %ebp, 1, killed %eax, 0, %noreg
- RETQ %ebp
+ $ebp = LEA32r killed $ebp, 1, killed $eax, 0, $noreg
+ RETQ $ebp
...
---
@@ -189,9 +189,9 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%eax' }
- - { reg: '%ebp' }
- - { reg: '%ebx' }
+ - { reg: '$eax' }
+ - { reg: '$ebp' }
+ - { reg: '$ebx' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -208,12 +208,12 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %eax, %ebp, %esi
- ; CHECK: %ebx = LEA32r killed %eax, 1, killed %ebp, 0
- ; CHECK: %ebx = ADD32ri8 %ebx, -5
+ liveins: $eax, $ebp, $esi
+ ; CHECK: $ebx = LEA32r killed $eax, 1, killed $ebp, 0
+ ; CHECK: $ebx = ADD32ri8 $ebx, -5
- %ebx = LEA32r killed %eax, 1, killed %ebp, -5, %noreg
- RETQ %ebx
+ $ebx = LEA32r killed $eax, 1, killed $ebp, -5, $noreg
+ RETQ $ebx
...
---
@@ -225,9 +225,9 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%eax' }
- - { reg: '%ebp' }
- - { reg: '%ebx' }
+ - { reg: '$eax' }
+ - { reg: '$ebp' }
+ - { reg: '$ebx' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -244,12 +244,12 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %eax, %ebp
- ; CHECK: %ebx = LEA32r killed %eax, 1, killed %ebp, 0, %noreg
- ; CHECK: %ebx = ADD32ri8 %ebx, -5
+ liveins: $eax, $ebp
+ ; CHECK: $ebx = LEA32r killed $eax, 1, killed $ebp, 0, $noreg
+ ; CHECK: $ebx = ADD32ri8 $ebx, -5
- %ebx = LEA32r killed %ebp, 1, killed %eax, -5, %noreg
- RETQ %ebx
+ $ebx = LEA32r killed $ebp, 1, killed $eax, -5, $noreg
+ RETQ $ebx
...
---
@@ -261,9 +261,9 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%eax' }
- - { reg: '%ebp' }
- - { reg: '%ebx' }
+ - { reg: '$eax' }
+ - { reg: '$ebp' }
+ - { reg: '$ebx' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -280,11 +280,11 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %eax, %ebp
- ; CHECK: %ebx = LEA32r killed %eax, 1, killed %ebp, 0, %noreg
+ liveins: $eax, $ebp
+ ; CHECK: $ebx = LEA32r killed $eax, 1, killed $ebp, 0, $noreg
- %ebx = LEA32r killed %ebp, 1, killed %eax, 0, %noreg
- RETQ %ebx
+ $ebx = LEA32r killed $ebp, 1, killed $eax, 0, $noreg
+ RETQ $ebx
...
---
@@ -296,8 +296,8 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%eax' }
- - { reg: '%ebp' }
+ - { reg: '$eax' }
+ - { reg: '$ebp' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -314,12 +314,12 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %eax, %ebp
- ; CHECK: %eax = ADD32rr %eax, killed %ebp
- ; CHECK: %eax = ADD32ri %eax, 129
+ liveins: $eax, $ebp
+ ; CHECK: $eax = ADD32rr $eax, killed $ebp
+ ; CHECK: $eax = ADD32ri $eax, 129
- %eax = LEA32r killed %eax, 1, killed %ebp, 129, %noreg
- RETQ %eax
+ $eax = LEA32r killed $eax, 1, killed $ebp, 129, $noreg
+ RETQ $eax
...
---
@@ -331,9 +331,9 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%eax' }
- - { reg: '%ebx' }
- - { reg: '%ebp' }
+ - { reg: '$eax' }
+ - { reg: '$ebx' }
+ - { reg: '$ebp' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -350,12 +350,12 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %eax, %ebp, %ebx
- ; CHECK: %ebx = MOV32rr %ebp
- ; CHECK: %ebx = ADD32rr %ebx, %ebp
+ liveins: $eax, $ebp, $ebx
+ ; CHECK: $ebx = MOV32rr $ebp
+ ; CHECK: $ebx = ADD32rr $ebx, $ebp
- %ebx = LEA32r killed %ebp, 1, %ebp, 0, %noreg
- RETQ %ebx
+ $ebx = LEA32r killed $ebp, 1, $ebp, 0, $noreg
+ RETQ $ebx
...
---
@@ -367,8 +367,8 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%ebx' }
- - { reg: '%ebp' }
+ - { reg: '$ebx' }
+ - { reg: '$ebp' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -385,12 +385,12 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %eax, %ebp, %ebx
- ; CHECK: %ebx = LEA32r %noreg, 1, %ebp, 5, %noreg
- ; CHECK: %ebx = ADD32rr %ebx, %ebp
+ liveins: $eax, $ebp, $ebx
+ ; CHECK: $ebx = LEA32r $noreg, 1, $ebp, 5, $noreg
+ ; CHECK: $ebx = ADD32rr $ebx, $ebp
- %ebx = LEA32r %ebp, 1, %ebp, 5, %noreg
- RETQ %ebx
+ $ebx = LEA32r $ebp, 1, $ebp, 5, $noreg
+ RETQ $ebx
...
---
@@ -402,8 +402,8 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%ebx' }
- - { reg: '%ebp' }
+ - { reg: '$ebx' }
+ - { reg: '$ebp' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -420,12 +420,12 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %eax, %ebp, %ebx
- ; CHECK: %ebx = LEA32r %noreg, 4, %ebp, 5, %noreg
- ; CHECK: %ebx = ADD32rr %ebx, %ebp
+ liveins: $eax, $ebp, $ebx
+ ; CHECK: $ebx = LEA32r $noreg, 4, $ebp, 5, $noreg
+ ; CHECK: $ebx = ADD32rr $ebx, $ebp
- %ebx = LEA32r %ebp, 4, %ebp, 5, %noreg
- RETQ %ebx
+ $ebx = LEA32r $ebp, 4, $ebp, 5, $noreg
+ RETQ $ebx
...
---
@@ -437,8 +437,8 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%ebx' }
- - { reg: '%ebp' }
+ - { reg: '$ebx' }
+ - { reg: '$ebp' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -455,11 +455,11 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %eax, %ebp, %ebx
- ; CHECK: %ebp = LEA32r killed %ebp, 4, killed %ebp, 0, %noreg
+ liveins: $eax, $ebp, $ebx
+ ; CHECK: $ebp = LEA32r killed $ebp, 4, killed $ebp, 0, $noreg
- %ebp = LEA32r killed %ebp, 4, killed %ebp, 0, %noreg
- RETQ %ebp
+ $ebp = LEA32r killed $ebp, 4, killed $ebp, 0, $noreg
+ RETQ $ebp
...
---
@@ -471,8 +471,8 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%ebp' }
- - { reg: '%eax' }
+ - { reg: '$ebp' }
+ - { reg: '$eax' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -489,19 +489,19 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %eax, %ebp, %ebx
- ; CHECK: %ebx = LEA32r killed %eax, 4, killed %eax, 5, %noreg
- ; CHECK: %ebp = LEA32r killed %ebx, 4, killed %ebx, 0, %noreg
- ; CHECK: %ebp = ADD32ri8 %ebp, 5
+ liveins: $eax, $ebp, $ebx
+ ; CHECK: $ebx = LEA32r killed $eax, 4, killed $eax, 5, $noreg
+ ; CHECK: $ebp = LEA32r killed $ebx, 4, killed $ebx, 0, $noreg
+ ; CHECK: $ebp = ADD32ri8 $ebp, 5
- CMP32rr %eax, killed %ebx, implicit-def %eflags
- %ebx = LEA32r killed %eax, 4, killed %eax, 5, %noreg
- JE_1 %bb.1, implicit %eflags
- RETQ %ebx
+ CMP32rr $eax, killed $ebx, implicit-def $eflags
+ $ebx = LEA32r killed $eax, 4, killed $eax, 5, $noreg
+ JE_1 %bb.1, implicit $eflags
+ RETQ $ebx
bb.1:
- liveins: %eax, %ebp, %ebx
- %ebp = LEA32r killed %ebx, 4, killed %ebx, 5, %noreg
- RETQ %ebp
+ liveins: $eax, $ebp, $ebx
+ $ebp = LEA32r killed $ebx, 4, killed $ebx, 5, $noreg
+ RETQ $ebp
...
diff --git a/llvm/test/CodeGen/X86/leaFixup64.mir b/llvm/test/CodeGen/X86/leaFixup64.mir
index 48989df..136777d 100644
--- a/llvm/test/CodeGen/X86/leaFixup64.mir
+++ b/llvm/test/CodeGen/X86/leaFixup64.mir
@@ -158,8 +158,8 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%rax' }
- - { reg: '%rbp' }
+ - { reg: '$rax' }
+ - { reg: '$rbp' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -176,12 +176,12 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %rax, %rbp
- ; CHECK: %eax = LEA64_32r killed %rax, 1, killed %rbp, 0
- ; CHECK: %eax = ADD32ri8 %eax, -5
+ liveins: $rax, $rbp
+ ; CHECK: $eax = LEA64_32r killed $rax, 1, killed $rbp, 0
+ ; CHECK: $eax = ADD32ri8 $eax, -5
- %eax = LEA64_32r killed %rax, 1, killed %rbp, -5, %noreg
- RETQ %eax
+ $eax = LEA64_32r killed $rax, 1, killed $rbp, -5, $noreg
+ RETQ $eax
...
---
@@ -193,8 +193,8 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%rax' }
- - { reg: '%rbp' }
+ - { reg: '$rax' }
+ - { reg: '$rbp' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -211,12 +211,12 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %rax, %rbp
- ; CHECK: %ebp = LEA64_32r killed %rax, 1, killed %rbp, 0
- ; CHECK: %ebp = ADD32ri8 %ebp, -5
+ liveins: $rax, $rbp
+ ; CHECK: $ebp = LEA64_32r killed $rax, 1, killed $rbp, 0
+ ; CHECK: $ebp = ADD32ri8 $ebp, -5
- %ebp = LEA64_32r killed %rbp, 1, killed %rax, -5, %noreg
- RETQ %ebp
+ $ebp = LEA64_32r killed $rbp, 1, killed $rax, -5, $noreg
+ RETQ $ebp
...
---
@@ -228,8 +228,8 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%rax' }
- - { reg: '%rbp' }
+ - { reg: '$rax' }
+ - { reg: '$rbp' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -246,11 +246,11 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %rax, %rbp
- ; CHECK: %ebp = LEA64_32r killed %rax, 1, killed %rbp, 0
+ liveins: $rax, $rbp
+ ; CHECK: $ebp = LEA64_32r killed $rax, 1, killed $rbp, 0
- %ebp = LEA64_32r killed %rbp, 1, killed %rax, 0, %noreg
- RETQ %ebp
+ $ebp = LEA64_32r killed $rbp, 1, killed $rax, 0, $noreg
+ RETQ $ebp
...
---
@@ -262,8 +262,8 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%rax' }
- - { reg: '%rbp' }
+ - { reg: '$rax' }
+ - { reg: '$rbp' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -280,12 +280,12 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %rax, %rbp
- ; CHECK: %rax = ADD64rr %rax, killed %rbp
- ; CHECK: %rax = ADD64ri8 %rax, -5
+ liveins: $rax, $rbp
+ ; CHECK: $rax = ADD64rr $rax, killed $rbp
+ ; CHECK: $rax = ADD64ri8 $rax, -5
- %rax = LEA64r killed %rax, 1, killed %rbp, -5, %noreg
- RETQ %eax
+ $rax = LEA64r killed $rax, 1, killed $rbp, -5, $noreg
+ RETQ $eax
...
---
@@ -297,8 +297,8 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%rax' }
- - { reg: '%rbp' }
+ - { reg: '$rax' }
+ - { reg: '$rbp' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -315,12 +315,12 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %rax, %rbp
- ; CHECK: %rbp = ADD64rr %rbp, killed %rax
- ; CHECK: %rbp = ADD64ri8 %rbp, -5
+ liveins: $rax, $rbp
+ ; CHECK: $rbp = ADD64rr $rbp, killed $rax
+ ; CHECK: $rbp = ADD64ri8 $rbp, -5
- %rbp = LEA64r killed %rbp, 1, killed %rax, -5, %noreg
- RETQ %ebp
+ $rbp = LEA64r killed $rbp, 1, killed $rax, -5, $noreg
+ RETQ $ebp
...
---
@@ -332,8 +332,8 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%rax' }
- - { reg: '%rbp' }
+ - { reg: '$rax' }
+ - { reg: '$rbp' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -350,11 +350,11 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %rax, %rbp
- ; CHECK: %rbp = ADD64rr %rbp, killed %rax
+ liveins: $rax, $rbp
+ ; CHECK: $rbp = ADD64rr $rbp, killed $rax
- %rbp = LEA64r killed %rbp, 1, killed %rax, 0, %noreg
- RETQ %ebp
+ $rbp = LEA64r killed $rbp, 1, killed $rax, 0, $noreg
+ RETQ $ebp
...
---
@@ -366,9 +366,9 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%rax' }
- - { reg: '%rbp' }
- - { reg: '%rbx' }
+ - { reg: '$rax' }
+ - { reg: '$rbp' }
+ - { reg: '$rbx' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -385,12 +385,12 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %rax, %rbp
- ; CHECK: %ebx = LEA64_32r killed %rax, 1, killed %rbp, 0, %noreg
- ; CHECK: %ebx = ADD32ri8 %ebx, -5
+ liveins: $rax, $rbp
+ ; CHECK: $ebx = LEA64_32r killed $rax, 1, killed $rbp, 0, $noreg
+ ; CHECK: $ebx = ADD32ri8 $ebx, -5
- %ebx = LEA64_32r killed %rax, 1, killed %rbp, -5, %noreg
- RETQ %ebx
+ $ebx = LEA64_32r killed $rax, 1, killed $rbp, -5, $noreg
+ RETQ $ebx
...
---
@@ -402,9 +402,9 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%rax' }
- - { reg: '%rbp' }
- - { reg: '%rbx' }
+ - { reg: '$rax' }
+ - { reg: '$rbp' }
+ - { reg: '$rbx' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -421,12 +421,12 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %rax, %rbp
- ; CHECK: %ebx = LEA64_32r killed %rax, 1, killed %rbp, 0, %noreg
- ; CHECK: %ebx = ADD32ri8 %ebx, -5
+ liveins: $rax, $rbp
+ ; CHECK: $ebx = LEA64_32r killed $rax, 1, killed $rbp, 0, $noreg
+ ; CHECK: $ebx = ADD32ri8 $ebx, -5
- %ebx = LEA64_32r killed %rbp, 1, killed %rax, -5, %noreg
- RETQ %ebx
+ $ebx = LEA64_32r killed $rbp, 1, killed $rax, -5, $noreg
+ RETQ $ebx
...
---
@@ -438,9 +438,9 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%rax' }
- - { reg: '%rbp' }
- - { reg: '%rbx' }
+ - { reg: '$rax' }
+ - { reg: '$rbp' }
+ - { reg: '$rbx' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -457,11 +457,11 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %rax, %rbp
- ; CHECK: %ebx = LEA64_32r killed %rax, 1, killed %rbp, 0, %noreg
+ liveins: $rax, $rbp
+ ; CHECK: $ebx = LEA64_32r killed $rax, 1, killed $rbp, 0, $noreg
- %ebx = LEA64_32r killed %rbp, 1, killed %rax, 0, %noreg
- RETQ %ebx
+ $ebx = LEA64_32r killed $rbp, 1, killed $rax, 0, $noreg
+ RETQ $ebx
...
---
@@ -473,9 +473,9 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%rax' }
- - { reg: '%rbp' }
- - { reg: '%rbx' }
+ - { reg: '$rax' }
+ - { reg: '$rbp' }
+ - { reg: '$rbx' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -492,12 +492,12 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %rax, %rbp
- ; CHECK: %rbx = LEA64r killed %rax, 1, killed %rbp, 0, %noreg
- ; CHECK: %rbx = ADD64ri8 %rbx, -5
+ liveins: $rax, $rbp
+ ; CHECK: $rbx = LEA64r killed $rax, 1, killed $rbp, 0, $noreg
+ ; CHECK: $rbx = ADD64ri8 $rbx, -5
- %rbx = LEA64r killed %rax, 1, killed %rbp, -5, %noreg
- RETQ %ebx
+ $rbx = LEA64r killed $rax, 1, killed $rbp, -5, $noreg
+ RETQ $ebx
...
---
@@ -509,9 +509,9 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%rax' }
- - { reg: '%rbp' }
- - { reg: '%rbx' }
+ - { reg: '$rax' }
+ - { reg: '$rbp' }
+ - { reg: '$rbx' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -528,12 +528,12 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %rax, %rbp
- ; CHECK: %rbx = LEA64r killed %rax, 1, killed %rbp, 0, %noreg
- ; CHECK: %rbx = ADD64ri8 %rbx, -5
+ liveins: $rax, $rbp
+ ; CHECK: $rbx = LEA64r killed $rax, 1, killed $rbp, 0, $noreg
+ ; CHECK: $rbx = ADD64ri8 $rbx, -5
- %rbx = LEA64r killed %rbp, 1, killed %rax, -5, %noreg
- RETQ %ebx
+ $rbx = LEA64r killed $rbp, 1, killed $rax, -5, $noreg
+ RETQ $ebx
...
---
@@ -545,9 +545,9 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%rax' }
- - { reg: '%rbp' }
- - { reg: '%rbx' }
+ - { reg: '$rax' }
+ - { reg: '$rbp' }
+ - { reg: '$rbx' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -564,11 +564,11 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %rax, %rbp
- ; CHECK: %rbx = LEA64r killed %rax, 1, killed %rbp, 0, %noreg
+ liveins: $rax, $rbp
+ ; CHECK: $rbx = LEA64r killed $rax, 1, killed $rbp, 0, $noreg
- %rbx = LEA64r killed %rbp, 1, killed %rax, 0, %noreg
- RETQ %ebx
+ $rbx = LEA64r killed $rbp, 1, killed $rax, 0, $noreg
+ RETQ $ebx
...
---
@@ -580,8 +580,8 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%rbp' }
+ - { reg: '$rdi' }
+ - { reg: '$rbp' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -598,13 +598,13 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %rdi, %rbp
- ; CHECK: %r12 = LEA64r %noreg, 2, killed %r13, 5, %noreg
- ; CHECK: %r12 = ADD64rr %r12, killed %rbp
- %rbp = KILL %rbp, implicit-def %rbp
- %r13 = KILL %rdi, implicit-def %r13
- %r12 = LEA64r killed %rbp, 2, killed %r13, 5, %noreg
- RETQ %r12
+ liveins: $rdi, $rbp
+ ; CHECK: $r12 = LEA64r $noreg, 2, killed $r13, 5, $noreg
+ ; CHECK: $r12 = ADD64rr $r12, killed $rbp
+ $rbp = KILL $rbp, implicit-def $rbp
+ $r13 = KILL $rdi, implicit-def $r13
+ $r12 = LEA64r killed $rbp, 2, killed $r13, 5, $noreg
+ RETQ $r12
...
---
@@ -616,8 +616,8 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%rax' }
- - { reg: '%rbp' }
+ - { reg: '$rax' }
+ - { reg: '$rbp' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -634,12 +634,12 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %rax, %rbp
- ; CHECK: %eax = LEA64_32r killed %rax, 1, killed %rbp, 0
- ; CHECK: %eax = ADD32ri %eax, 129
+ liveins: $rax, $rbp
+ ; CHECK: $eax = LEA64_32r killed $rax, 1, killed $rbp, 0
+ ; CHECK: $eax = ADD32ri $eax, 129
- %eax = LEA64_32r killed %rax, 1, killed %rbp, 129, %noreg
- RETQ %eax
+ $eax = LEA64_32r killed $rax, 1, killed $rbp, 129, $noreg
+ RETQ $eax
...
---
@@ -651,8 +651,8 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%rax' }
- - { reg: '%rbp' }
+ - { reg: '$rax' }
+ - { reg: '$rbp' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -669,11 +669,11 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %rax, %rbp, %rbx
- ; CHECK: %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 0, %noreg
+ liveins: $rax, $rbp, $rbx
+ ; CHECK: $ebx = LEA64_32r killed $rbp, 1, killed $rbp, 0, $noreg
- %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 0, %noreg
- RETQ %ebx
+ $ebx = LEA64_32r killed $rbp, 1, killed $rbp, 0, $noreg
+ RETQ $ebx
...
---
@@ -685,8 +685,8 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%rbx' }
- - { reg: '%rbp' }
+ - { reg: '$rbx' }
+ - { reg: '$rbp' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -703,11 +703,11 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %rax, %rbp, %rbx
- ; CHECK: %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 5, %noreg
+ liveins: $rax, $rbp, $rbx
+ ; CHECK: $ebx = LEA64_32r killed $rbp, 1, killed $rbp, 5, $noreg
- %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 5, %noreg
- RETQ %ebx
+ $ebx = LEA64_32r killed $rbp, 1, killed $rbp, 5, $noreg
+ RETQ $ebx
...
---
@@ -719,8 +719,8 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%rbx' }
- - { reg: '%rbp' }
+ - { reg: '$rbx' }
+ - { reg: '$rbp' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -737,11 +737,11 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %eax, %ebp, %ebx
- ; CHECK: %ebx = LEA64_32r killed %rbp, 4, killed %rbp, 5, %noreg
+ liveins: $eax, $ebp, $ebx
+ ; CHECK: $ebx = LEA64_32r killed $rbp, 4, killed $rbp, 5, $noreg
- %ebx = LEA64_32r killed %rbp, 4, killed %rbp, 5, %noreg
- RETQ %ebx
+ $ebx = LEA64_32r killed $rbp, 4, killed $rbp, 5, $noreg
+ RETQ $ebx
...
---
@@ -753,8 +753,8 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%rax' }
- - { reg: '%rbp' }
+ - { reg: '$rax' }
+ - { reg: '$rbp' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -771,12 +771,12 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %rax, %rbp
- ; CHECK: %rax = ADD64rr %rax, killed %rbp
- ; CHECK: %rax = ADD64ri32 %rax, 129
+ liveins: $rax, $rbp
+ ; CHECK: $rax = ADD64rr $rax, killed $rbp
+ ; CHECK: $rax = ADD64ri32 $rax, 129
- %rax = LEA64r killed %rax, 1, killed %rbp, 129, %noreg
- RETQ %eax
+ $rax = LEA64r killed $rax, 1, killed $rbp, 129, $noreg
+ RETQ $eax
...
---
@@ -788,8 +788,8 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%rax' }
- - { reg: '%rbp' }
+ - { reg: '$rax' }
+ - { reg: '$rbp' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -806,12 +806,12 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %rax, %rbp, %rbx
- ; CHECK: %rbx = MOV64rr %rbp
- ; CHECK: %rbx = ADD64rr %rbx, %rbp
+ liveins: $rax, $rbp, $rbx
+ ; CHECK: $rbx = MOV64rr $rbp
+ ; CHECK: $rbx = ADD64rr $rbx, $rbp
- %rbx = LEA64r killed %rbp, 1, %rbp, 0, %noreg
- RETQ %ebx
+ $rbx = LEA64r killed $rbp, 1, $rbp, 0, $noreg
+ RETQ $ebx
...
---
@@ -823,8 +823,8 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%rbx' }
- - { reg: '%rbp' }
+ - { reg: '$rbx' }
+ - { reg: '$rbp' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -841,12 +841,12 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %rax, %rbp, %rbx
- ; CHECK: %rbx = LEA64r %noreg, 1, %rbp, 5, %noreg
- ; CHECK: %rbx = ADD64rr %rbx, %rbp
+ liveins: $rax, $rbp, $rbx
+ ; CHECK: $rbx = LEA64r $noreg, 1, $rbp, 5, $noreg
+ ; CHECK: $rbx = ADD64rr $rbx, $rbp
- %rbx = LEA64r %rbp, 1, %rbp, 5, %noreg
- RETQ %ebx
+ $rbx = LEA64r $rbp, 1, $rbp, 5, $noreg
+ RETQ $ebx
...
---
@@ -858,8 +858,8 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%rbx' }
- - { reg: '%rbp' }
+ - { reg: '$rbx' }
+ - { reg: '$rbp' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -876,12 +876,12 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %rax, %rbp, %rbx
- ; CHECK: %rbx = LEA64r %noreg, 4, %rbp, 5, %noreg
- ; CHECK: %rbx = ADD64rr %rbx, %rbp
+ liveins: $rax, $rbp, $rbx
+ ; CHECK: $rbx = LEA64r $noreg, 4, $rbp, 5, $noreg
+ ; CHECK: $rbx = ADD64rr $rbx, $rbp
- %rbx = LEA64r %rbp, 4, %rbp, 5, %noreg
- RETQ %ebx
+ $rbx = LEA64r $rbp, 4, $rbp, 5, $noreg
+ RETQ $ebx
...
---
@@ -893,8 +893,8 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%rbx' }
- - { reg: '%rbp' }
+ - { reg: '$rbx' }
+ - { reg: '$rbp' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -911,11 +911,11 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %rax, %rbp, %rbx
- ; CHECK: %rbp = LEA64r killed %rbp, 4, killed %rbp, 0, %noreg
+ liveins: $rax, $rbp, $rbx
+ ; CHECK: $rbp = LEA64r killed $rbp, 4, killed $rbp, 0, $noreg
- %rbp = LEA64r killed %rbp, 4, killed %rbp, 0, %noreg
- RETQ %ebp
+ $rbp = LEA64r killed $rbp, 4, killed $rbp, 0, $noreg
+ RETQ $ebp
...
---
@@ -927,8 +927,8 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%rbp' }
- - { reg: '%rax' }
+ - { reg: '$rbp' }
+ - { reg: '$rax' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -945,19 +945,19 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %rax, %rbp, %rbx
- ; CHECK: %rbx = LEA64r killed %rax, 4, killed %rax, 5, %noreg
- ; CHECK: %rbp = LEA64r killed %rbx, 4, killed %rbx, 0, %noreg
- ; CHECK: %rbp = ADD64ri8 %rbp, 5
+ liveins: $rax, $rbp, $rbx
+ ; CHECK: $rbx = LEA64r killed $rax, 4, killed $rax, 5, $noreg
+ ; CHECK: $rbp = LEA64r killed $rbx, 4, killed $rbx, 0, $noreg
+ ; CHECK: $rbp = ADD64ri8 $rbp, 5
- CMP64rr %rax, killed %rbx, implicit-def %eflags
- %rbx = LEA64r killed %rax, 4, killed %rax, 5, %noreg
- JE_1 %bb.1, implicit %eflags
- RETQ %ebx
+ CMP64rr $rax, killed $rbx, implicit-def $eflags
+ $rbx = LEA64r killed $rax, 4, killed $rax, 5, $noreg
+ JE_1 %bb.1, implicit $eflags
+ RETQ $ebx
bb.1:
- liveins: %rax, %rbp, %rbx
- %rbp = LEA64r killed %rbx, 4, killed %rbx, 5, %noreg
- RETQ %ebp
+ liveins: $rax, $rbp, $rbx
+ $rbp = LEA64r killed $rbx, 4, killed $rbx, 5, $noreg
+ RETQ $ebp
...
---
@@ -969,8 +969,8 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%rbx' }
- - { reg: '%rbp' }
+ - { reg: '$rbx' }
+ - { reg: '$rbp' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -987,11 +987,11 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %rax, %rbp, %rbx
- ; CHECK: %ebp = LEA64_32r killed %rbp, 4, killed %rbp, 0, %noreg
+ liveins: $rax, $rbp, $rbx
+ ; CHECK: $ebp = LEA64_32r killed $rbp, 4, killed $rbp, 0, $noreg
- %ebp = LEA64_32r killed %rbp, 4, killed %rbp, 0, %noreg
- RETQ %ebp
+ $ebp = LEA64_32r killed $rbp, 4, killed $rbp, 0, $noreg
+ RETQ $ebp
...
---
@@ -1003,8 +1003,8 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%rbp' }
- - { reg: '%rax' }
+ - { reg: '$rbp' }
+ - { reg: '$rax' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -1021,19 +1021,19 @@
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
- liveins: %rax, %rbp, %rbx
- ; CHECK: %ebx = LEA64_32r killed %rax, 4, killed %rax, 5, %noreg
- ; CHECK: %ebp = LEA64_32r killed %rbx, 4, killed %rbx, 0, %noreg
- ; CHECK: %ebp = ADD32ri8 %ebp, 5
+ liveins: $rax, $rbp, $rbx
+ ; CHECK: $ebx = LEA64_32r killed $rax, 4, killed $rax, 5, $noreg
+ ; CHECK: $ebp = LEA64_32r killed $rbx, 4, killed $rbx, 0, $noreg
+ ; CHECK: $ebp = ADD32ri8 $ebp, 5
- CMP64rr %rax, killed %rbx, implicit-def %eflags
- %ebx = LEA64_32r killed %rax, 4, killed %rax, 5, %noreg
- JE_1 %bb.1, implicit %eflags
- RETQ %ebx
+ CMP64rr $rax, killed $rbx, implicit-def $eflags
+ $ebx = LEA64_32r killed $rax, 4, killed $rax, 5, $noreg
+ JE_1 %bb.1, implicit $eflags
+ RETQ $ebx
bb.1:
- liveins: %rax, %rbp, %rbx
- %ebp = LEA64_32r killed %rbx, 4, killed %rbx, 5, %noreg
- RETQ %ebp
+ liveins: $rax, $rbp, $rbx
+ $ebp = LEA64_32r killed $rbx, 4, killed $rbx, 5, $noreg
+ RETQ $ebp
...
diff --git a/llvm/test/CodeGen/X86/loop-search.ll b/llvm/test/CodeGen/X86/loop-search.ll
index 88e9963..54bc3c2 100644
--- a/llvm/test/CodeGen/X86/loop-search.ll
+++ b/llvm/test/CodeGen/X86/loop-search.ll
@@ -25,15 +25,15 @@
; ### FIXME: %bb.3 and LBB0_1 should be merged
; CHECK-NEXT: ## %bb.3:
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
; CHECK-NEXT: LBB0_1:
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
; CHECK-NEXT: LBB0_6:
; CHECK-NEXT: movb $1, %al
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
entry:
%cmp5 = icmp sgt i32 %count, 0
diff --git a/llvm/test/CodeGen/X86/lzcnt-schedule.ll b/llvm/test/CodeGen/X86/lzcnt-schedule.ll
index 43cb146..3420e85 100644
--- a/llvm/test/CodeGen/X86/lzcnt-schedule.ll
+++ b/llvm/test/CodeGen/X86/lzcnt-schedule.ll
@@ -13,7 +13,7 @@
; GENERIC-NEXT: lzcntw (%rsi), %cx # sched: [7:1.00]
; GENERIC-NEXT: lzcntw %di, %ax # sched: [3:1.00]
; GENERIC-NEXT: orl %ecx, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
+; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_ctlz_i16:
@@ -21,7 +21,7 @@
; HASWELL-NEXT: lzcntw (%rsi), %cx # sched: [8:1.00]
; HASWELL-NEXT: lzcntw %di, %ax # sched: [3:1.00]
; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; HASWELL-NEXT: # kill: def %ax killed %ax killed %eax
+; HASWELL-NEXT: # kill: def $ax killed $ax killed $eax
; HASWELL-NEXT: retq # sched: [7:1.00]
;
; BROADWELL-LABEL: test_ctlz_i16:
@@ -29,7 +29,7 @@
; BROADWELL-NEXT: lzcntw (%rsi), %cx # sched: [8:1.00]
; BROADWELL-NEXT: lzcntw %di, %ax # sched: [3:1.00]
; BROADWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; BROADWELL-NEXT: # kill: def %ax killed %ax killed %eax
+; BROADWELL-NEXT: # kill: def $ax killed $ax killed $eax
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_ctlz_i16:
@@ -37,7 +37,7 @@
; SKYLAKE-NEXT: lzcntw (%rsi), %cx # sched: [8:1.00]
; SKYLAKE-NEXT: lzcntw %di, %ax # sched: [3:1.00]
; SKYLAKE-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; SKYLAKE-NEXT: # kill: def %ax killed %ax killed %eax
+; SKYLAKE-NEXT: # kill: def $ax killed $ax killed $eax
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_ctlz_i16:
@@ -45,7 +45,7 @@
; BTVER2-NEXT: lzcntw (%rsi), %cx # sched: [6:1.00]
; BTVER2-NEXT: lzcntw %di, %ax # sched: [3:1.00]
; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; BTVER2-NEXT: # kill: def %ax killed %ax killed %eax
+; BTVER2-NEXT: # kill: def $ax killed $ax killed $eax
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_ctlz_i16:
@@ -53,7 +53,7 @@
; ZNVER1-NEXT: lzcntw (%rsi), %cx # sched: [6:0.50]
; ZNVER1-NEXT: lzcntw %di, %ax # sched: [2:0.25]
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: # kill: def %ax killed %ax killed %eax
+; ZNVER1-NEXT: # kill: def $ax killed $ax killed $eax
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i16, i16 *%a1
%2 = tail call i16 @llvm.ctlz.i16( i16 %1, i1 false )
diff --git a/llvm/test/CodeGen/X86/lzcnt-zext-cmp.ll b/llvm/test/CodeGen/X86/lzcnt-zext-cmp.ll
index 9a31a8d..90abd2d 100644
--- a/llvm/test/CodeGen/X86/lzcnt-zext-cmp.ll
+++ b/llvm/test/CodeGen/X86/lzcnt-zext-cmp.ll
@@ -84,7 +84,7 @@
; ALL-NEXT: sete %cl
; ALL-NEXT: orb %al, %cl
; ALL-NEXT: movzbl %cl, %eax
-; ALL-NEXT: # kill: def %ax killed %ax killed %eax
+; ALL-NEXT: # kill: def $ax killed $ax killed $eax
; ALL-NEXT: retq
%cmp = icmp eq i16 %a, 0
%cmp1 = icmp eq i16 %b, 0
@@ -128,7 +128,7 @@
; FASTLZCNT-NEXT: lzcntq %rsi, %rax
; FASTLZCNT-NEXT: orl %ecx, %eax
; FASTLZCNT-NEXT: shrl $6, %eax
-; FASTLZCNT-NEXT: # kill: def %eax killed %eax killed %rax
+; FASTLZCNT-NEXT: # kill: def $eax killed $eax killed $rax
; FASTLZCNT-NEXT: retq
;
; NOFASTLZCNT-LABEL: test_zext_cmp5:
@@ -267,7 +267,7 @@
; FASTLZCNT-NEXT: shrl $5, %ecx
; FASTLZCNT-NEXT: shrl $6, %eax
; FASTLZCNT-NEXT: orl %ecx, %eax
-; FASTLZCNT-NEXT: # kill: def %eax killed %eax killed %rax
+; FASTLZCNT-NEXT: # kill: def $eax killed $eax killed $rax
; FASTLZCNT-NEXT: retq
;
; NOFASTLZCNT-LABEL: test_zext_cmp9:
diff --git a/llvm/test/CodeGen/X86/machine-combiner-int.ll b/llvm/test/CodeGen/X86/machine-combiner-int.ll
index 07282c2..ba1a564 100644
--- a/llvm/test/CodeGen/X86/machine-combiner-int.ll
+++ b/llvm/test/CodeGen/X86/machine-combiner-int.ll
@@ -34,8 +34,8 @@
; CHECK-NEXT: retq
; DEAD: ADD32rr
-; DEAD-NEXT: IMUL32rr{{.*}}implicit-def dead %eflags
-; DEAD-NEXT: IMUL32rr{{.*}}implicit-def dead %eflags
+; DEAD-NEXT: IMUL32rr{{.*}}implicit-def dead $eflags
+; DEAD-NEXT: IMUL32rr{{.*}}implicit-def dead $eflags
%t0 = add i32 %x0, %x1
%t1 = mul i32 %x2, %t0
diff --git a/llvm/test/CodeGen/X86/machine-copy-prop.mir b/llvm/test/CodeGen/X86/machine-copy-prop.mir
index 0545458..cf6ce3d 100644
--- a/llvm/test/CodeGen/X86/machine-copy-prop.mir
+++ b/llvm/test/CodeGen/X86/machine-copy-prop.mir
@@ -20,196 +20,196 @@
# the kill flag of intermediate instructions.
# CHECK-LABEL: name: copyprop_remove_kill0
# CHECK: bb.0:
-# CHECK-NEXT: %rax = COPY %rdi
-# CHECK-NEXT: NOOP implicit %rdi
+# CHECK-NEXT: $rax = COPY $rdi
+# CHECK-NEXT: NOOP implicit $rdi
# CHECK-NOT: COPY
-# CHECK-NEXT: NOOP implicit %rax, implicit %rdi
+# CHECK-NEXT: NOOP implicit $rax, implicit $rdi
name: copyprop_remove_kill0
body: |
bb.0:
- %rax = COPY %rdi
- NOOP implicit killed %rdi
- %rdi = COPY %rax
- NOOP implicit %rax, implicit %rdi
+ $rax = COPY $rdi
+ NOOP implicit killed $rdi
+ $rdi = COPY $rax
+ NOOP implicit $rax, implicit $rdi
...
---
# The second copy is redundant and will be removed, check that we also remove
# the kill flag of intermediate instructions.
# CHECK-LABEL: name: copyprop_remove_kill1
# CHECK: bb.0:
-# CHECK-NEXT: %rax = COPY %rdi
-# CHECK-NEXT: NOOP implicit %edi
+# CHECK-NEXT: $rax = COPY $rdi
+# CHECK-NEXT: NOOP implicit $edi
# CHECK-NOT: COPY
-# CHECK-NEXT: NOOP implicit %rax, implicit %rdi
+# CHECK-NEXT: NOOP implicit $rax, implicit $rdi
name: copyprop_remove_kill1
body: |
bb.0:
- %rax = COPY %rdi
- NOOP implicit killed %edi
- %rdi = COPY %rax
- NOOP implicit %rax, implicit %rdi
+ $rax = COPY $rdi
+ NOOP implicit killed $edi
+ $rdi = COPY $rax
+ NOOP implicit $rax, implicit $rdi
...
---
# The second copy is redundant and will be removed, check that we also remove
# the kill flag of intermediate instructions.
# CHECK-LABEL: name: copyprop_remove_kill2
# CHECK: bb.0:
-# CHECK-NEXT: %ax = COPY %di
-# CHECK-NEXT: NOOP implicit %rdi
+# CHECK-NEXT: $ax = COPY $di
+# CHECK-NEXT: NOOP implicit $rdi
# CHECK-NOT: COPY
-# CHECK-NEXT: NOOP implicit %rax, implicit %rdi
+# CHECK-NEXT: NOOP implicit $rax, implicit $rdi
name: copyprop_remove_kill2
body: |
bb.0:
- %ax = COPY %di
- NOOP implicit killed %rdi
- %di = COPY %ax
- NOOP implicit %rax, implicit %rdi
+ $ax = COPY $di
+ NOOP implicit killed $rdi
+ $di = COPY $ax
+ NOOP implicit $rax, implicit $rdi
...
---
# The second copy is redundant; the call preserves the source and dest register.
# CHECK-LABEL: name: copyprop0
# CHECK: bb.0:
-# CHECK-NEXT: %rax = COPY %rdi
+# CHECK-NEXT: $rax = COPY $rdi
# CHECK-NEXT: CALL64pcrel32 @foo, csr_64_rt_mostregs
-# CHECK-NEXT: NOOP implicit %edi
+# CHECK-NEXT: NOOP implicit $edi
# CHECK-NOT: COPY
-# CHECK-NEXT: NOOP implicit %rax, implicit %rdi
+# CHECK-NEXT: NOOP implicit $rax, implicit $rdi
name: copyprop0
body: |
bb.0:
- %rax = COPY %rdi
+ $rax = COPY $rdi
CALL64pcrel32 @foo, csr_64_rt_mostregs
- NOOP implicit killed %edi
- %rdi = COPY %rax
- NOOP implicit %rax, implicit %rdi
+ NOOP implicit killed $edi
+ $rdi = COPY $rax
+ NOOP implicit $rax, implicit $rdi
...
---
# The 2nd copy is redundant; The call preserves the source and dest register.
# CHECK-LABEL: name: copyprop1
# CHECK: bb.0:
-# CHECK-NEXT: %rax = COPY %rdi
-# CHECK-NEXT: NOOP implicit %rax
-# CHECK-NEXT: NOOP implicit %rax, implicit %rdi
+# CHECK-NEXT: $rax = COPY $rdi
+# CHECK-NEXT: NOOP implicit $rax
+# CHECK-NEXT: NOOP implicit $rax, implicit $rdi
name: copyprop1
body: |
bb.0:
- %rax = COPY %rdi
- NOOP implicit killed %rax
- %rax = COPY %rdi
- NOOP implicit %rax, implicit %rdi
+ $rax = COPY $rdi
+ NOOP implicit killed $rax
+ $rax = COPY $rdi
+ NOOP implicit $rax, implicit $rdi
...
---
# CHECK-LABEL: name: copyprop2
# CHECK: bb.0:
-# CHECK-NEXT: %rax = COPY %rdi
-# CHECK-NEXT: NOOP implicit %ax
+# CHECK-NEXT: $rax = COPY $rdi
+# CHECK-NEXT: NOOP implicit $ax
# CHECK-NEXT: CALL64pcrel32 @foo, csr_64_rt_mostregs
-# CHECK-NOT: %rax = COPY %rdi
-# CHECK-NEXT: NOOP implicit %rax, implicit %rdi
+# CHECK-NOT: $rax = COPY $rdi
+# CHECK-NEXT: NOOP implicit $rax, implicit $rdi
name: copyprop2
body: |
bb.0:
- %rax = COPY %rdi
- NOOP implicit killed %ax
+ $rax = COPY $rdi
+ NOOP implicit killed $ax
CALL64pcrel32 @foo, csr_64_rt_mostregs
- %rax = COPY %rdi
- NOOP implicit %rax, implicit %rdi
+ $rax = COPY $rdi
+ NOOP implicit $rax, implicit $rdi
...
---
-# The second copy is not redundant if the source register (%rax) is clobbered
-# even if the dest (%rbp) is not.
+# The second copy is not redundant if the source register ($rax) is clobbered
+# even if the dest ($rbp) is not.
# CHECK-LABEL: name: nocopyprop0
# CHECK: bb.0:
-# CHECK-NEXT: %rax = COPY %rbp
-# CHECK-NEXT: CALL64pcrel32 @foo, csr_64, implicit %rax, implicit %rbp
-# CHECK-NEXT: %rbp = COPY %rax
-# CHECK-NEXT: NOOP implicit %rax, implicit %rbp
+# CHECK-NEXT: $rax = COPY $rbp
+# CHECK-NEXT: CALL64pcrel32 @foo, csr_64, implicit $rax, implicit $rbp
+# CHECK-NEXT: $rbp = COPY $rax
+# CHECK-NEXT: NOOP implicit $rax, implicit $rbp
name: nocopyprop0
body: |
bb.0:
- %rax = COPY %rbp
- CALL64pcrel32 @foo, csr_64, implicit %rax, implicit %rbp
- %rbp = COPY %rax
- NOOP implicit %rax, implicit %rbp
+ $rax = COPY $rbp
+ CALL64pcrel32 @foo, csr_64, implicit $rax, implicit $rbp
+ $rbp = COPY $rax
+ NOOP implicit $rax, implicit $rbp
...
---
-# The second copy is not redundant if the dest register (%rax) is clobbered
-# even if the source (%rbp) is not.
+# The second copy is not redundant if the dest register ($rax) is clobbered
+# even if the source ($rbp) is not.
# CHECK-LABEL: name: nocopyprop1
# CHECK: bb.0:
-# CHECK-NEXT: %rbp = COPY %rax
-# CHECK-NEXT: CALL64pcrel32 @foo, csr_64, implicit %rax, implicit %rbp
-# CHECK-NEXT: %rax = COPY %rbp
-# CHECK-NEXT: NOOP implicit %rax, implicit %rbp
+# CHECK-NEXT: $rbp = COPY $rax
+# CHECK-NEXT: CALL64pcrel32 @foo, csr_64, implicit $rax, implicit $rbp
+# CHECK-NEXT: $rax = COPY $rbp
+# CHECK-NEXT: NOOP implicit $rax, implicit $rbp
name: nocopyprop1
body: |
bb.0:
- %rbp = COPY %rax
- CALL64pcrel32 @foo, csr_64, implicit %rax, implicit %rbp
- %rax = COPY %rbp
- NOOP implicit %rax, implicit %rbp
+ $rbp = COPY $rax
+ CALL64pcrel32 @foo, csr_64, implicit $rax, implicit $rbp
+ $rax = COPY $rbp
+ NOOP implicit $rax, implicit $rbp
...
---
-# The second copy is not redundant if the source register (%rax) is clobbered
-# even if the dest (%rbp) is not.
+# The second copy is not redundant if the source register ($rax) is clobbered
+# even if the dest ($rbp) is not.
# CHECK-LABEL: name: nocopyprop2
# CHECK: bb.0:
-# CHECK-NEXT: %rax = COPY %rbp
-# CHECK-NEXT: CALL64pcrel32 @foo, csr_64, implicit %rax, implicit %rbp
-# CHECK-NEXT: %rax = COPY %rbp
-# CHECK-NEXT: NOOP implicit %rax, implicit %rbp
+# CHECK-NEXT: $rax = COPY $rbp
+# CHECK-NEXT: CALL64pcrel32 @foo, csr_64, implicit $rax, implicit $rbp
+# CHECK-NEXT: $rax = COPY $rbp
+# CHECK-NEXT: NOOP implicit $rax, implicit $rbp
name: nocopyprop2
body: |
bb.0:
- %rax = COPY %rbp
- CALL64pcrel32 @foo, csr_64, implicit %rax, implicit %rbp
- %rax = COPY %rbp
- NOOP implicit %rax, implicit %rbp
+ $rax = COPY $rbp
+ CALL64pcrel32 @foo, csr_64, implicit $rax, implicit $rbp
+ $rax = COPY $rbp
+ NOOP implicit $rax, implicit $rbp
...
---
-# The second copy is not redundant if the dest register (%rax) is clobbered
-# even if the source (%rbp) is not.
+# The second copy is not redundant if the dest register ($rax) is clobbered
+# even if the source ($rbp) is not.
# CHECK-LABEL: name: nocopyprop3
# CHECK: bb.0:
-# CHECK-NEXT: %rbp = COPY %rax
-# CHECK-NEXT: CALL64pcrel32 @foo, csr_64, implicit %rax, implicit %rbp
-# CHECK-NEXT: %rbp = COPY %rax
-# CHECK-NEXT: NOOP implicit %rax, implicit %rbp
+# CHECK-NEXT: $rbp = COPY $rax
+# CHECK-NEXT: CALL64pcrel32 @foo, csr_64, implicit $rax, implicit $rbp
+# CHECK-NEXT: $rbp = COPY $rax
+# CHECK-NEXT: NOOP implicit $rax, implicit $rbp
name: nocopyprop3
body: |
bb.0:
- %rbp = COPY %rax
- CALL64pcrel32 @foo, csr_64, implicit %rax, implicit %rbp
- %rbp = COPY %rax
- NOOP implicit %rax, implicit %rbp
+ $rbp = COPY $rax
+ CALL64pcrel32 @foo, csr_64, implicit $rax, implicit $rbp
+ $rbp = COPY $rax
+ NOOP implicit $rax, implicit $rbp
...
---
# A reserved register may change its value so the 2nd copy is not redundant.
# CHECK-LABEL: name: nocopyprop4
# CHECK: bb.0:
-# CHECK-NEXT: %rax = COPY %rip
-# CHECK-NEXT: NOOP implicit %rax
-# CHECK-NEXT: %rax = COPY %rip
-# CHECK-NEXT: NOOP implicit %rax
+# CHECK-NEXT: $rax = COPY $rip
+# CHECK-NEXT: NOOP implicit $rax
+# CHECK-NEXT: $rax = COPY $rip
+# CHECK-NEXT: NOOP implicit $rax
name: nocopyprop4
body: |
bb.0:
- %rax = COPY %rip
- NOOP implicit %rax
- %rax = COPY %rip
- NOOP implicit %rax
+ $rax = COPY $rip
+ NOOP implicit $rax
+ $rax = COPY $rip
+ NOOP implicit $rax
...
---
# Writing to a reserved register may have additional effects (slightly illegal
-# testcase because writing to %rip like this should make the instruction a jump)
+# testcase because writing to $rip like this should make the instruction a jump)
# CHECK-LABEL: name: nocopyprop5
# CHECK: bb.0:
-# CHECK-NEXT: %rip = COPY %rax
-# CHECK-NEXT: %rip = COPY %rax
+# CHECK-NEXT: $rip = COPY $rax
+# CHECK-NEXT: $rip = COPY $rax
name: nocopyprop5
body: |
bb.0:
- %rip = COPY %rax
- %rip = COPY %rax
+ $rip = COPY $rax
+ $rip = COPY $rax
...
diff --git a/llvm/test/CodeGen/X86/machine-cse.ll b/llvm/test/CodeGen/X86/machine-cse.ll
index 0e33238..ec831cb 100644
--- a/llvm/test/CodeGen/X86/machine-cse.ll
+++ b/llvm/test/CodeGen/X86/machine-cse.ll
@@ -50,8 +50,8 @@
define void @commute(i32 %test_case, i32 %scale) nounwind ssp {
; CHECK-LABEL: commute:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def %esi killed %esi def %rsi
-; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: # kill: def $esi killed $esi def $rsi
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
; CHECK-NEXT: leal -1(%rdi), %eax
; CHECK-NEXT: cmpl $2, %eax
; CHECK-NEXT: ja .LBB1_4
@@ -64,7 +64,7 @@
; CHECK-NEXT: imull %edi, %esi
; CHECK-NEXT: leal (%rsi,%rsi,2), %esi
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: # kill: def %edi killed %edi killed %rdi
+; CHECK-NEXT: # kill: def $edi killed $edi killed $rdi
; CHECK-NEXT: callq printf
; CHECK-NEXT: addq $8, %rsp
; CHECK-NEXT: .p2align 4, 0x90
diff --git a/llvm/test/CodeGen/X86/machine-region-info.mir b/llvm/test/CodeGen/X86/machine-region-info.mir
index 7704cb2..e831823 100644
--- a/llvm/test/CodeGen/X86/machine-region-info.mir
+++ b/llvm/test/CodeGen/X86/machine-region-info.mir
@@ -4,45 +4,45 @@
name: fun
body: |
bb.0:
- CMP32ri8 %edi, 40, implicit-def %eflags
- JNE_1 %bb.7, implicit killed %eflags
+ CMP32ri8 $edi, 40, implicit-def $eflags
+ JNE_1 %bb.7, implicit killed $eflags
JMP_1 %bb.1
bb.1:
- CMP32ri8 %edi, 1, implicit-def %eflags
- JNE_1 %bb.11, implicit killed %eflags
+ CMP32ri8 $edi, 1, implicit-def $eflags
+ JNE_1 %bb.11, implicit killed $eflags
JMP_1 %bb.2
bb.2:
- CMP32ri8 %edi, 2, implicit-def %eflags
- JNE_1 %bb.5, implicit killed %eflags
+ CMP32ri8 $edi, 2, implicit-def $eflags
+ JNE_1 %bb.5, implicit killed $eflags
JMP_1 %bb.3
bb.3:
- CMP32ri8 %edi, 90, implicit-def %eflags
- JNE_1 %bb.5, implicit killed %eflags
+ CMP32ri8 $edi, 90, implicit-def $eflags
+ JNE_1 %bb.5, implicit killed $eflags
JMP_1 %bb.4
bb.4:
bb.5:
- CMP32ri8 %edi, 4, implicit-def %eflags
- JNE_1 %bb.11, implicit killed %eflags
+ CMP32ri8 $edi, 4, implicit-def $eflags
+ JNE_1 %bb.11, implicit killed $eflags
JMP_1 %bb.6
bb.6:
JMP_1 %bb.11
bb.7:
- CMP32ri8 %edi, 5, implicit-def %eflags
- JE_1 %bb.9, implicit killed %eflags
+ CMP32ri8 $edi, 5, implicit-def $eflags
+ JE_1 %bb.9, implicit killed $eflags
JMP_1 %bb.8
bb.8:
bb.9:
- CMP32ri8 %edi, 6, implicit-def %eflags
- JE_1 %bb.11, implicit killed %eflags
+ CMP32ri8 $edi, 6, implicit-def $eflags
+ JE_1 %bb.11, implicit killed $eflags
JMP_1 %bb.10
bb.10:
diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
index 574f271..3acb987 100644
--- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll
+++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
@@ -299,8 +299,8 @@
;
; KNL_32-LABEL: test6:
; KNL_32: # %bb.0:
-; KNL_32-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; KNL_32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL_32-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; KNL_32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL_32-NEXT: movw $255, %ax
; KNL_32-NEXT: kmovw %eax, %k1
; KNL_32-NEXT: kmovw %k1, %k2
@@ -337,7 +337,7 @@
;
; KNL_64-LABEL: test7:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL_64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL_64-NEXT: kmovw %esi, %k0
; KNL_64-NEXT: kshiftlw $8, %k0, %k0
; KNL_64-NEXT: kshiftrw $8, %k0, %k1
@@ -350,7 +350,7 @@
;
; KNL_32-LABEL: test7:
; KNL_32: # %bb.0:
-; KNL_32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL_32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; KNL_32-NEXT: kmovw %ecx, %k0
@@ -496,7 +496,7 @@
; KNL_32-NEXT: movw $255, %ax
; KNL_32-NEXT: kmovw %eax, %k1
; KNL_32-NEXT: vpgatherdd (,%zmm1), %zmm0 {%k1}
-; KNL_32-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL_32-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL_32-NEXT: retl
;
; SKX_SMALL-LABEL: test9:
@@ -582,7 +582,7 @@
; KNL_32-NEXT: movw $255, %ax
; KNL_32-NEXT: kmovw %eax, %k1
; KNL_32-NEXT: vpgatherdd (,%zmm1), %zmm0 {%k1}
-; KNL_32-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL_32-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL_32-NEXT: retl
;
; SKX_SMALL-LABEL: test10:
@@ -819,7 +819,7 @@
define <4 x float> @test15(float* %base, <4 x i32> %ind, <4 x i1> %mask) {
; KNL_64-LABEL: test15:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL_64-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL_64-NEXT: vpslld $31, %xmm1, %xmm1
; KNL_64-NEXT: vptestmd %zmm1, %zmm1, %k0
; KNL_64-NEXT: kshiftlw $12, %k0, %k0
@@ -831,7 +831,7 @@
;
; KNL_32-LABEL: test15:
; KNL_32: # %bb.0:
-; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL_32-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL_32-NEXT: vpslld $31, %xmm1, %xmm1
; KNL_32-NEXT: vptestmd %zmm1, %zmm1, %k0
; KNL_32-NEXT: kshiftlw $12, %k0, %k0
@@ -869,8 +869,8 @@
define <4 x double> @test16(double* %base, <4 x i32> %ind, <4 x i1> %mask, <4 x double> %src0) {
; KNL_64-LABEL: test16:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
-; KNL_64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; KNL_64-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2
+; KNL_64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; KNL_64-NEXT: vpslld $31, %xmm1, %xmm1
; KNL_64-NEXT: vptestmd %zmm1, %zmm1, %k0
; KNL_64-NEXT: kshiftlw $12, %k0, %k0
@@ -881,8 +881,8 @@
;
; KNL_32-LABEL: test16:
; KNL_32: # %bb.0:
-; KNL_32-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
-; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; KNL_32-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2
+; KNL_32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; KNL_32-NEXT: vpslld $31, %xmm1, %xmm1
; KNL_32-NEXT: vptestmd %zmm1, %zmm1, %k0
; KNL_32-NEXT: kshiftlw $12, %k0, %k0
@@ -918,7 +918,7 @@
define <2 x double> @test17(double* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x double> %src0) {
; KNL_64-LABEL: test17:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2
+; KNL_64-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2
; KNL_64-NEXT: vpsllq $32, %xmm0, %xmm0
; KNL_64-NEXT: vpsraq $32, %zmm0, %zmm0
; KNL_64-NEXT: vpsllq $63, %xmm1, %xmm1
@@ -932,7 +932,7 @@
;
; KNL_32-LABEL: test17:
; KNL_32: # %bb.0:
-; KNL_32-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2
+; KNL_32-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2
; KNL_32-NEXT: vpsllq $32, %xmm0, %xmm0
; KNL_32-NEXT: vpsraq $32, %zmm0, %zmm0
; KNL_32-NEXT: vpsllq $63, %xmm1, %xmm1
@@ -981,8 +981,8 @@
define void @test18(<4 x i32>%a1, <4 x i32*> %ptr, <4 x i1>%mask) {
; KNL_64-LABEL: test18:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; KNL_64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; KNL_64-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; KNL_64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; KNL_64-NEXT: vpslld $31, %xmm2, %xmm2
; KNL_64-NEXT: vptestmd %zmm2, %zmm2, %k0
; KNL_64-NEXT: kshiftlw $12, %k0, %k0
@@ -993,8 +993,8 @@
;
; KNL_32-LABEL: test18:
; KNL_32: # %bb.0:
-; KNL_32-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL_32-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; KNL_32-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL_32-NEXT: vpslld $31, %xmm2, %xmm2
; KNL_32-NEXT: vptestmd %zmm2, %zmm2, %k0
; KNL_32-NEXT: kshiftlw $12, %k0, %k0
@@ -1024,8 +1024,8 @@
define void @test19(<4 x double>%a1, double* %ptr, <4 x i1>%mask, <4 x i64> %ind) {
; KNL_64-LABEL: test19:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
-; KNL_64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL_64-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2
+; KNL_64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL_64-NEXT: vpslld $31, %xmm1, %xmm1
; KNL_64-NEXT: vptestmd %zmm1, %zmm1, %k0
; KNL_64-NEXT: kshiftlw $12, %k0, %k0
@@ -1036,8 +1036,8 @@
;
; KNL_32-LABEL: test19:
; KNL_32: # %bb.0:
-; KNL_32-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
-; KNL_32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL_32-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2
+; KNL_32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL_32-NEXT: vpslld $31, %xmm1, %xmm1
; KNL_32-NEXT: vptestmd %zmm1, %zmm1, %k0
; KNL_32-NEXT: kshiftlw $12, %k0, %k0
@@ -1072,8 +1072,8 @@
define void @test20(<2 x float>%a1, <2 x float*> %ptr, <2 x i1> %mask) {
; KNL_64-LABEL: test20:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; KNL_64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; KNL_64-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; KNL_64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; KNL_64-NEXT: vpsllq $63, %xmm2, %xmm2
; KNL_64-NEXT: vptestmq %zmm2, %zmm2, %k0
; KNL_64-NEXT: kshiftlw $14, %k0, %k0
@@ -1084,7 +1084,7 @@
;
; KNL_32-LABEL: test20:
; KNL_32: # %bb.0:
-; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL_32-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL_32-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,2,2,3]
; KNL_32-NEXT: vpsllq $63, %xmm2, %xmm2
; KNL_32-NEXT: vptestmq %zmm2, %zmm2, %k0
@@ -1116,7 +1116,7 @@
define void @test21(<2 x i32>%a1, <2 x i32*> %ptr, <2 x i1>%mask) {
; KNL_64-LABEL: test21:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; KNL_64-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; KNL_64-NEXT: vpsllq $63, %xmm2, %xmm2
; KNL_64-NEXT: vptestmq %zmm2, %zmm2, %k0
; KNL_64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1164,7 +1164,7 @@
define <2 x float> @test22(float* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x float> %src0) {
; KNL_64-LABEL: test22:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2
+; KNL_64-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2
; KNL_64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
; KNL_64-NEXT: vpsllq $63, %xmm1, %xmm1
; KNL_64-NEXT: vptestmq %zmm1, %zmm1, %k0
@@ -1177,7 +1177,7 @@
;
; KNL_32-LABEL: test22:
; KNL_32: # %bb.0:
-; KNL_32-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2
+; KNL_32-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2
; KNL_32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
; KNL_32-NEXT: vpsllq $63, %xmm1, %xmm1
; KNL_32-NEXT: vptestmq %zmm1, %zmm1, %k0
@@ -1216,8 +1216,8 @@
define <2 x float> @test22a(float* %base, <2 x i64> %ind, <2 x i1> %mask, <2 x float> %src0) {
; KNL_64-LABEL: test22a:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: def %xmm2 killed %xmm2 def %ymm2
-; KNL_64-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL_64-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2
+; KNL_64-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL_64-NEXT: vpsllq $63, %xmm1, %xmm1
; KNL_64-NEXT: vptestmq %zmm1, %zmm1, %k0
; KNL_64-NEXT: kshiftlw $14, %k0, %k0
@@ -1229,8 +1229,8 @@
;
; KNL_32-LABEL: test22a:
; KNL_32: # %bb.0:
-; KNL_32-NEXT: # kill: def %xmm2 killed %xmm2 def %ymm2
-; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL_32-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2
+; KNL_32-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL_32-NEXT: vpsllq $63, %xmm1, %xmm1
; KNL_32-NEXT: vptestmq %zmm1, %zmm1, %k0
; KNL_32-NEXT: kshiftlw $14, %k0, %k0
@@ -1322,7 +1322,7 @@
define <2 x i32> @test23b(i32* %base, <2 x i64> %ind, <2 x i1> %mask, <2 x i32> %src0) {
; KNL_64-LABEL: test23b:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL_64-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL_64-NEXT: vpsllq $63, %xmm1, %xmm1
; KNL_64-NEXT: vptestmq %zmm1, %zmm1, %k0
; KNL_64-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
@@ -1335,7 +1335,7 @@
;
; KNL_32-LABEL: test23b:
; KNL_32: # %bb.0:
-; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL_32-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL_32-NEXT: vpsllq $63, %xmm1, %xmm1
; KNL_32-NEXT: vptestmq %zmm1, %zmm1, %k0
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -1419,7 +1419,7 @@
define <2 x i64> @test25(i64* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x i64> %src0) {
; KNL_64-LABEL: test25:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2
+; KNL_64-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2
; KNL_64-NEXT: vpsllq $32, %xmm0, %xmm0
; KNL_64-NEXT: vpsraq $32, %zmm0, %zmm0
; KNL_64-NEXT: vpsllq $63, %xmm1, %xmm1
@@ -1433,7 +1433,7 @@
;
; KNL_32-LABEL: test25:
; KNL_32: # %bb.0:
-; KNL_32-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2
+; KNL_32-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2
; KNL_32-NEXT: vpsllq $32, %xmm0, %xmm0
; KNL_32-NEXT: vpsraq $32, %zmm0, %zmm0
; KNL_32-NEXT: vpsllq $63, %xmm1, %xmm1
@@ -1475,7 +1475,7 @@
define <2 x i64> @test26(i64* %base, <2 x i32> %ind, <2 x i64> %src0) {
; KNL_64-LABEL: test26:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; KNL_64-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; KNL_64-NEXT: vpsllq $32, %xmm0, %xmm0
; KNL_64-NEXT: vpsraq $32, %zmm0, %zmm0
; KNL_64-NEXT: movb $3, %al
@@ -1487,7 +1487,7 @@
;
; KNL_32-LABEL: test26:
; KNL_32: # %bb.0:
-; KNL_32-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; KNL_32-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; KNL_32-NEXT: vpsllq $32, %xmm0, %xmm0
; KNL_32-NEXT: vpsraq $32, %zmm0, %zmm0
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -1530,7 +1530,7 @@
; KNL_64-NEXT: movw $3, %ax
; KNL_64-NEXT: kmovw %eax, %k1
; KNL_64-NEXT: vgatherdps (%rdi,%zmm1,4), %zmm0 {%k1}
-; KNL_64-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL_64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL_64-NEXT: vzeroupper
; KNL_64-NEXT: retq
;
@@ -1541,7 +1541,7 @@
; KNL_32-NEXT: movw $3, %cx
; KNL_32-NEXT: kmovw %ecx, %k1
; KNL_32-NEXT: vgatherdps (%eax,%zmm1,4), %zmm0 {%k1}
-; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL_32-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL_32-NEXT: vzeroupper
; KNL_32-NEXT: retl
;
@@ -1571,7 +1571,7 @@
define void @test28(<2 x i32>%a1, <2 x i32*> %ptr) {
; KNL_64-LABEL: test28:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; KNL_64-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; KNL_64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; KNL_64-NEXT: movb $3, %al
; KNL_64-NEXT: kmovw %eax, %k1
@@ -1665,7 +1665,7 @@
define <3 x i32> @test30(<3 x i32*> %base, <3 x i32> %ind, <3 x i1> %mask, <3 x i32> %src0) {
; KNL_64-LABEL: test30:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: def %xmm3 killed %xmm3 def %zmm3
+; KNL_64-NEXT: # kill: def $xmm3 killed $xmm3 def $zmm3
; KNL_64-NEXT: vpslld $31, %xmm2, %xmm2
; KNL_64-NEXT: vptestmd %zmm2, %zmm2, %k1
; KNL_64-NEXT: kmovw %k1, %eax
@@ -1673,7 +1673,7 @@
; KNL_64-NEXT: vpsllq $2, %ymm1, %ymm1
; KNL_64-NEXT: vpaddq %ymm1, %ymm0, %ymm1
; KNL_64-NEXT: testb $1, %al
-; KNL_64-NEXT: # implicit-def: %xmm0
+; KNL_64-NEXT: # implicit-def: $xmm0
; KNL_64-NEXT: je .LBB31_2
; KNL_64-NEXT: # %bb.1: # %cond.load
; KNL_64-NEXT: vmovq %xmm1, %rax
@@ -1711,7 +1711,7 @@
; KNL_32-NEXT: vpslld $2, %xmm1, %xmm1
; KNL_32-NEXT: vpaddd %xmm1, %xmm0, %xmm2
; KNL_32-NEXT: testb $1, %al
-; KNL_32-NEXT: # implicit-def: %xmm1
+; KNL_32-NEXT: # implicit-def: $xmm1
; KNL_32-NEXT: je .LBB31_2
; KNL_32-NEXT: # %bb.1: # %cond.load
; KNL_32-NEXT: vmovd %xmm2, %eax
@@ -1735,7 +1735,7 @@
; KNL_32-NEXT: vpinsrd $2, (%eax), %xmm1, %xmm1
; KNL_32-NEXT: .LBB31_6: # %else5
; KNL_32-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
-; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL_32-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL_32-NEXT: addl $12, %esp
; KNL_32-NEXT: vzeroupper
; KNL_32-NEXT: retl
@@ -1749,7 +1749,7 @@
; SKX-NEXT: vpsllq $2, %ymm1, %ymm1
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm1
; SKX-NEXT: testb $1, %al
-; SKX-NEXT: # implicit-def: %xmm0
+; SKX-NEXT: # implicit-def: $xmm0
; SKX-NEXT: je .LBB31_2
; SKX-NEXT: # %bb.1: # %cond.load
; SKX-NEXT: vmovq %xmm1, %rax
@@ -1787,7 +1787,7 @@
; SKX_32-NEXT: vpslld $2, %xmm1, %xmm1
; SKX_32-NEXT: vpaddd %xmm1, %xmm0, %xmm2
; SKX_32-NEXT: testb $1, %al
-; SKX_32-NEXT: # implicit-def: %xmm1
+; SKX_32-NEXT: # implicit-def: $xmm1
; SKX_32-NEXT: je .LBB31_2
; SKX_32-NEXT: # %bb.1: # %cond.load
; SKX_32-NEXT: vmovd %xmm2, %eax
@@ -2329,7 +2329,7 @@
define <4 x i64> @test_pr28312(<4 x i64*> %p1, <4 x i1> %k, <4 x i1> %k2,<4 x i64> %d) {
; KNL_64-LABEL: test_pr28312:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL_64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL_64-NEXT: vpslld $31, %xmm1, %xmm1
; KNL_64-NEXT: vptestmd %zmm1, %zmm1, %k0
; KNL_64-NEXT: kshiftlw $12, %k0, %k0
@@ -2348,7 +2348,7 @@
; KNL_32-NEXT: .cfi_def_cfa_register %ebp
; KNL_32-NEXT: andl $-32, %esp
; KNL_32-NEXT: subl $32, %esp
-; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; KNL_32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; KNL_32-NEXT: vpslld $31, %xmm1, %xmm1
; KNL_32-NEXT: vptestmd %zmm1, %zmm1, %k0
; KNL_32-NEXT: kshiftlw $12, %k0, %k0
@@ -2517,7 +2517,7 @@
define <2 x float> @large_index(float* %base, <2 x i128> %ind, <2 x i1> %mask, <2 x float> %src0) {
; KNL_64-LABEL: large_index:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; KNL_64-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; KNL_64-NEXT: vpsllq $63, %xmm0, %xmm0
; KNL_64-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL_64-NEXT: kshiftlw $14, %k0, %k0
@@ -2532,7 +2532,7 @@
;
; KNL_32-LABEL: large_index:
; KNL_32: # %bb.0:
-; KNL_32-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; KNL_32-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; KNL_32-NEXT: vpsllq $63, %xmm0, %xmm0
; KNL_32-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL_32-NEXT: kshiftlw $14, %k0, %k0
@@ -2624,7 +2624,7 @@
; KNL_64-NEXT: movw $255, %ax
; KNL_64-NEXT: kmovw %eax, %k1
; KNL_64-NEXT: vgatherdps (%rdi,%zmm1,4), %zmm0 {%k1}
-; KNL_64-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL_64-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: sext_v8i8_index:
@@ -2636,7 +2636,7 @@
; KNL_32-NEXT: movw $255, %cx
; KNL_32-NEXT: kmovw %ecx, %k1
; KNL_32-NEXT: vgatherdps (%eax,%zmm1,4), %zmm0 {%k1}
-; KNL_32-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL_32-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL_32-NEXT: retl
;
; SKX-LABEL: sext_v8i8_index:
@@ -2670,7 +2670,7 @@
define void @test_scatter_2i32_index(<2 x double> %a1, double* %base, <2 x i32> %ind, <2 x i1> %mask) {
; KNL_64-LABEL: test_scatter_2i32_index:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL_64-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL_64-NEXT: vpsllq $32, %xmm1, %xmm1
; KNL_64-NEXT: vpsraq $32, %zmm1, %zmm1
; KNL_64-NEXT: vpsllq $63, %xmm2, %xmm2
@@ -2683,7 +2683,7 @@
;
; KNL_32-LABEL: test_scatter_2i32_index:
; KNL_32: # %bb.0:
-; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL_32-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL_32-NEXT: vpsllq $32, %xmm1, %xmm1
; KNL_32-NEXT: vpsraq $32, %zmm1, %zmm1
; KNL_32-NEXT: vpsllq $63, %xmm2, %xmm2
diff --git a/llvm/test/CodeGen/X86/masked_memop.ll b/llvm/test/CodeGen/X86/masked_memop.ll
index cd28147..4a25020 100644
--- a/llvm/test/CodeGen/X86/masked_memop.ll
+++ b/llvm/test/CodeGen/X86/masked_memop.ll
@@ -12,7 +12,7 @@
; AVX-LABEL: loadv1:
; AVX: ## %bb.0:
; AVX-NEXT: testq %rdi, %rdi
-; AVX-NEXT: ## implicit-def: %xmm1
+; AVX-NEXT: ## implicit-def: $xmm1
; AVX-NEXT: je LBB0_1
; AVX-NEXT: ## %bb.2: ## %else
; AVX-NEXT: testq %rdi, %rdi
@@ -32,7 +32,7 @@
; AVX512F-LABEL: loadv1:
; AVX512F: ## %bb.0:
; AVX512F-NEXT: testq %rdi, %rdi
-; AVX512F-NEXT: ## implicit-def: %xmm1
+; AVX512F-NEXT: ## implicit-def: $xmm1
; AVX512F-NEXT: jne LBB0_2
; AVX512F-NEXT: ## %bb.1: ## %cond.load
; AVX512F-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
@@ -46,7 +46,7 @@
; SKX-LABEL: loadv1:
; SKX: ## %bb.0:
; SKX-NEXT: testq %rdi, %rdi
-; SKX-NEXT: ## implicit-def: %xmm1
+; SKX-NEXT: ## implicit-def: $xmm1
; SKX-NEXT: jne LBB0_2
; SKX-NEXT: ## %bb.1: ## %cond.load
; SKX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
@@ -99,13 +99,13 @@
;
; AVX512F-LABEL: test6:
; AVX512F: ## %bb.0:
-; AVX512F-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512F-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512F-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vptestnmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kshiftlw $14, %k0, %k0
; AVX512F-NEXT: kshiftrw $14, %k0, %k1
; AVX512F-NEXT: vblendmpd (%rdi), %zmm1, %zmm0 {%k1}
-; AVX512F-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -130,13 +130,13 @@
;
; AVX512F-LABEL: test7:
; AVX512F: ## %bb.0:
-; AVX512F-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512F-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512F-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vptestnmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kshiftlw $12, %k0, %k0
; AVX512F-NEXT: kshiftrw $12, %k0, %k1
; AVX512F-NEXT: vblendmps (%rdi), %zmm1, %zmm0 {%k1}
-; AVX512F-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -169,13 +169,13 @@
;
; AVX512F-LABEL: test8:
; AVX512F: ## %bb.0:
-; AVX512F-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512F-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512F-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vptestnmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kshiftlw $12, %k0, %k0
; AVX512F-NEXT: kshiftrw $12, %k0, %k1
; AVX512F-NEXT: vpblendmd (%rdi), %zmm1, %zmm0 {%k1}
-; AVX512F-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -206,8 +206,8 @@
;
; AVX512F-LABEL: test9:
; AVX512F: ## %bb.0:
-; AVX512F-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512F-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512F-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vptestnmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kshiftlw $12, %k0, %k0
; AVX512F-NEXT: kshiftrw $12, %k0, %k1
@@ -249,13 +249,13 @@
;
; AVX512F-LABEL: test10:
; AVX512F: ## %bb.0:
-; AVX512F-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512F-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512F-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vptestnmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kshiftlw $12, %k0, %k0
; AVX512F-NEXT: kshiftrw $12, %k0, %k1
; AVX512F-NEXT: vblendmpd (%rdi), %zmm1, %zmm0 {%k1}
-; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512F-NEXT: retq
;
; SKX-LABEL: test10:
@@ -290,12 +290,12 @@
;
; AVX512F-LABEL: test10b:
; AVX512F: ## %bb.0:
-; AVX512F-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vptestnmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kshiftlw $12, %k0, %k0
; AVX512F-NEXT: kshiftrw $12, %k0, %k1
; AVX512F-NEXT: vmovupd (%rdi), %zmm0 {%k1} {z}
-; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512F-NEXT: retq
;
; SKX-LABEL: test10b:
@@ -330,13 +330,13 @@
;
; AVX512F-LABEL: test11a:
; AVX512F: ## %bb.0:
-; AVX512F-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512F-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512F-NEXT: vptestnmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kshiftlw $8, %k0, %k0
; AVX512F-NEXT: kshiftrw $8, %k0, %k1
; AVX512F-NEXT: vblendmps (%rdi), %zmm1, %zmm0 {%k1}
-; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512F-NEXT: retq
;
; SKX-LABEL: test11a:
@@ -374,12 +374,12 @@
;
; AVX512F-LABEL: test11b:
; AVX512F: ## %bb.0:
-; AVX512F-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512F-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
; AVX512F-NEXT: vpmovsxwq %xmm0, %zmm0
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1
; AVX512F-NEXT: vpblendmd (%rdi), %zmm1, %zmm0 {%k1}
-; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512F-NEXT: retq
;
; SKX-LABEL: test11b:
@@ -419,7 +419,7 @@
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1
; AVX512F-NEXT: vmovups (%rdi), %zmm0 {%k1} {z}
-; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512F-NEXT: retq
;
; SKX-LABEL: test11c:
@@ -459,7 +459,7 @@
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1
; AVX512F-NEXT: vmovdqu32 (%rdi), %zmm0 {%k1} {z}
-; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512F-NEXT: retq
;
; SKX-LABEL: test11d:
@@ -494,8 +494,8 @@
;
; AVX512F-LABEL: test12:
; AVX512F: ## %bb.0:
-; AVX512F-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512F-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512F-NEXT: vptestnmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kshiftlw $8, %k0, %k0
; AVX512F-NEXT: kshiftrw $8, %k0, %k1
@@ -535,7 +535,7 @@
;
; AVX512F-LABEL: test14:
; AVX512F: ## %bb.0:
-; AVX512F-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512F-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX512F-NEXT: vptestnmq %zmm0, %zmm0, %k0
@@ -625,14 +625,14 @@
;
; AVX512F-LABEL: test16:
; AVX512F: ## %bb.0:
-; AVX512F-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512F-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX512F-NEXT: vptestnmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kshiftlw $14, %k0, %k0
; AVX512F-NEXT: kshiftrw $14, %k0, %k1
; AVX512F-NEXT: vblendmps (%rdi), %zmm1, %zmm0 {%k1}
-; AVX512F-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -727,7 +727,7 @@
; AVX512F-NEXT: kshiftlw $14, %k0, %k0
; AVX512F-NEXT: kshiftrw $14, %k0, %k1
; AVX512F-NEXT: vmovups (%rdi), %zmm0 {%k1} {z}
-; AVX512F-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -754,7 +754,7 @@
; AVX512F-NEXT: movw $15, %ax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vmovups (%rdi), %zmm0 {%k1} {z}
-; AVX512F-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -780,11 +780,11 @@
;
; AVX512F-LABEL: mload_constmask_v4f32:
; AVX512F: ## %bb.0:
-; AVX512F-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: movw $13, %ax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vmovups (%rdi), %zmm0 {%k1}
-; AVX512F-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -831,11 +831,11 @@
;
; AVX512F-LABEL: mload_constmask_v4i32:
; AVX512F: ## %bb.0:
-; AVX512F-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: movw $14, %ax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vmovdqu32 (%rdi), %zmm0 {%k1}
-; AVX512F-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -875,11 +875,11 @@
;
; AVX512F-LABEL: mload_constmask_v8f32:
; AVX512F: ## %bb.0:
-; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512F-NEXT: movw $7, %ax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vmovups (%rdi), %zmm0 {%k1}
-; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512F-NEXT: retq
;
; SKX-LABEL: mload_constmask_v8f32:
@@ -902,11 +902,11 @@
;
; AVX512F-LABEL: mload_constmask_v4f64:
; AVX512F: ## %bb.0:
-; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512F-NEXT: movb $7, %al
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vmovupd (%rdi), %zmm0 {%k1}
-; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512F-NEXT: retq
;
; SKX-LABEL: mload_constmask_v4f64:
@@ -929,11 +929,11 @@
;
; AVX512F-LABEL: mload_constmask_v8i32:
; AVX512F: ## %bb.0:
-; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512F-NEXT: movw $135, %ax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vmovdqu32 (%rdi), %zmm0 {%k1}
-; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512F-NEXT: retq
;
; SKX-LABEL: mload_constmask_v8i32:
@@ -954,11 +954,11 @@
;
; AVX512F-LABEL: mload_constmask_v4i64:
; AVX512F: ## %bb.0:
-; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512F-NEXT: movb $9, %al
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 {%k1}
-; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512F-NEXT: retq
;
; SKX-LABEL: mload_constmask_v4i64:
@@ -1011,7 +1011,7 @@
; AVX512F-NEXT: movb $7, %al
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vmovupd (%rdi), %zmm0 {%k1} {z}
-; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512F-NEXT: retq
;
; SKX-LABEL: mload_constmask_v4f64_undef_passthrough:
@@ -1042,7 +1042,7 @@
; AVX512F-NEXT: movb $6, %al
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 {%k1} {z}
-; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512F-NEXT: retq
;
; SKX-LABEL: mload_constmask_v4i64_undef_passthrough:
@@ -1070,7 +1070,7 @@
;
; AVX512F-LABEL: test21:
; AVX512F: ## %bb.0:
-; AVX512F-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512F-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
; AVX512F-NEXT: movw $15, %ax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vmovdqu32 %zmm1, (%rdi) {%k1}
@@ -1290,8 +1290,8 @@
;
; AVX512F-LABEL: trunc_mask:
; AVX512F: ## %bb.0:
-; AVX512F-NEXT: ## kill: def %xmm2 killed %xmm2 def %zmm2
-; AVX512F-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512F-NEXT: ## kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vpcmpgtd %zmm2, %zmm1, %k0
; AVX512F-NEXT: kshiftlw $12, %k0, %k0
diff --git a/llvm/test/CodeGen/X86/misched-copy.ll b/llvm/test/CodeGen/X86/misched-copy.ll
index 9456bf6..3a1e0fa 100644
--- a/llvm/test/CodeGen/X86/misched-copy.ll
+++ b/llvm/test/CodeGen/X86/misched-copy.ll
@@ -9,10 +9,10 @@
; MUL_HiLo PhysReg def copies should be just below the mul.
;
; CHECK: *** Final schedule for %bb.1 ***
-; CHECK: %eax = COPY
-; CHECK-NEXT: MUL32r %{{[0-9]+}}:gr32, implicit-def %eax, implicit-def %edx, implicit-def dead %eflags, implicit %eax
-; CHECK-NEXT: COPY %e{{[ad]}}x
-; CHECK-NEXT: COPY %e{{[ad]}}x
+; CHECK: $eax = COPY
+; CHECK-NEXT: MUL32r %{{[0-9]+}}:gr32, implicit-def $eax, implicit-def $edx, implicit-def dead $eflags, implicit $eax
+; CHECK-NEXT: COPY $e{{[ad]}}x
+; CHECK-NEXT: COPY $e{{[ad]}}x
; CHECK: DIVSSrm
define i64 @mulhoist(i32 %a, i32 %b) #0 {
entry:
diff --git a/llvm/test/CodeGen/X86/movmsk.ll b/llvm/test/CodeGen/X86/movmsk.ll
index d2ee19d..4a724f1 100644
--- a/llvm/test/CodeGen/X86/movmsk.ll
+++ b/llvm/test/CodeGen/X86/movmsk.ll
@@ -102,7 +102,7 @@
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movq %xmm0, %rdi
; CHECK-NEXT: shrq $63, %rdi
-; CHECK-NEXT: ## kill: def %edi killed %edi killed %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi killed $rdi
; CHECK-NEXT: jmp _float_call_signbit_callee ## TAILCALL
entry:
%t0 = bitcast double %n to i64
diff --git a/llvm/test/CodeGen/X86/movtopush.mir b/llvm/test/CodeGen/X86/movtopush.mir
index 4c1dfc5..783eb2e 100644
--- a/llvm/test/CodeGen/X86/movtopush.mir
+++ b/llvm/test/CodeGen/X86/movtopush.mir
@@ -33,25 +33,25 @@
...
---
# CHECK-LABEL: test9
-# CHECK: ADJCALLSTACKDOWN32 16, 0, 16, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
-# CHECK-NEXT: PUSH32i8 4, implicit-def %esp, implicit %esp
-# CHECK-NEXT: PUSH32i8 3, implicit-def %esp, implicit %esp
-# CHECK-NEXT: PUSH32i8 2, implicit-def %esp, implicit %esp
-# CHECK-NEXT: PUSH32i8 1, implicit-def %esp, implicit %esp
-# CHECK-NEXT: CALLpcrel32 @good, csr_32, implicit %esp, implicit %ssp, implicit-def %esp, implicit-def %ssp
-# CHECK-NEXT: ADJCALLSTACKUP32 16, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
-# CHECK-NEXT: ADJCALLSTACKDOWN32 20, 0, 20, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
-# CHECK-NEXT: %1:gr32 = MOV32rm %stack.2.s, 1, %noreg, 0, %noreg :: (load 4 from %stack.2.s, align 8)
-# CHECK-NEXT: %2:gr32 = MOV32rm %stack.2.s, 1, %noreg, 4, %noreg :: (load 4 from %stack.2.s + 4)
-# CHECK-NEXT: %4:gr32 = LEA32r %stack.0.p, 1, %noreg, 0, %noreg
-# CHECK-NEXT: %5:gr32 = LEA32r %stack.1.q, 1, %noreg, 0, %noreg
-# CHECK-NEXT: PUSH32r %4, implicit-def %esp, implicit %esp
-# CHECK-NEXT: PUSH32r %5, implicit-def %esp, implicit %esp
-# CHECK-NEXT: PUSH32i8 6, implicit-def %esp, implicit %esp
-# CHECK-NEXT: PUSH32r %2, implicit-def %esp, implicit %esp
-# CHECK-NEXT: PUSH32r %1, implicit-def %esp, implicit %esp
-# CHECK-NEXT: CALLpcrel32 @struct, csr_32, implicit %esp, implicit %ssp, implicit-def %esp, implicit-def %ssp
-# CHECK-NEXT: ADJCALLSTACKUP32 20, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
+# CHECK: ADJCALLSTACKDOWN32 16, 0, 16, implicit-def dead $esp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $esp, implicit $ssp
+# CHECK-NEXT: PUSH32i8 4, implicit-def $esp, implicit $esp
+# CHECK-NEXT: PUSH32i8 3, implicit-def $esp, implicit $esp
+# CHECK-NEXT: PUSH32i8 2, implicit-def $esp, implicit $esp
+# CHECK-NEXT: PUSH32i8 1, implicit-def $esp, implicit $esp
+# CHECK-NEXT: CALLpcrel32 @good, csr_32, implicit $esp, implicit $ssp, implicit-def $esp, implicit-def $ssp
+# CHECK-NEXT: ADJCALLSTACKUP32 16, 0, implicit-def dead $esp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $esp, implicit $ssp
+# CHECK-NEXT: ADJCALLSTACKDOWN32 20, 0, 20, implicit-def dead $esp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $esp, implicit $ssp
+# CHECK-NEXT: %1:gr32 = MOV32rm %stack.2.s, 1, $noreg, 0, $noreg :: (load 4 from %stack.2.s, align 8)
+# CHECK-NEXT: %2:gr32 = MOV32rm %stack.2.s, 1, $noreg, 4, $noreg :: (load 4 from %stack.2.s + 4)
+# CHECK-NEXT: %4:gr32 = LEA32r %stack.0.p, 1, $noreg, 0, $noreg
+# CHECK-NEXT: %5:gr32 = LEA32r %stack.1.q, 1, $noreg, 0, $noreg
+# CHECK-NEXT: PUSH32r %4, implicit-def $esp, implicit $esp
+# CHECK-NEXT: PUSH32r %5, implicit-def $esp, implicit $esp
+# CHECK-NEXT: PUSH32i8 6, implicit-def $esp, implicit $esp
+# CHECK-NEXT: PUSH32r %2, implicit-def $esp, implicit $esp
+# CHECK-NEXT: PUSH32r %1, implicit-def $esp, implicit $esp
+# CHECK-NEXT: CALLpcrel32 @struct, csr_32, implicit $esp, implicit $ssp, implicit-def $esp, implicit-def $ssp
+# CHECK-NEXT: ADJCALLSTACKUP32 20, 0, implicit-def dead $esp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $esp, implicit $ssp
# CHECK-NEXT: RET 0
name: test9
alignment: 0
@@ -99,27 +99,27 @@
constants:
body: |
bb.0.entry:
- ADJCALLSTACKDOWN32 16, 0, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
- %0 = COPY %esp
- MOV32mi %0, 1, %noreg, 12, %noreg, 4 :: (store 4 into stack + 12)
- MOV32mi %0, 1, %noreg, 8, %noreg, 3 :: (store 4 into stack + 8)
- MOV32mi %0, 1, %noreg, 4, %noreg, 2 :: (store 4 into stack + 4)
- MOV32mi %0, 1, %noreg, 0, %noreg, 1 :: (store 4 into stack)
- CALLpcrel32 @good, csr_32, implicit %esp, implicit %ssp, implicit-def %esp, implicit-def %ssp
- ADJCALLSTACKUP32 16, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
- ADJCALLSTACKDOWN32 20, 0, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
- %1 = MOV32rm %stack.2.s, 1, %noreg, 0, %noreg :: (load 4 from %stack.2.s, align 8)
- %2 = MOV32rm %stack.2.s, 1, %noreg, 4, %noreg :: (load 4 from %stack.2.s + 4)
- %3 = COPY %esp
- MOV32mr %3, 1, %noreg, 4, %noreg, killed %2 :: (store 4)
- MOV32mr %3, 1, %noreg, 0, %noreg, killed %1 :: (store 4)
- %4 = LEA32r %stack.0.p, 1, %noreg, 0, %noreg
- MOV32mr %3, 1, %noreg, 16, %noreg, killed %4 :: (store 4 into stack + 16)
- %5 = LEA32r %stack.1.q, 1, %noreg, 0, %noreg
- MOV32mr %3, 1, %noreg, 12, %noreg, killed %5 :: (store 4 into stack + 12)
- MOV32mi %3, 1, %noreg, 8, %noreg, 6 :: (store 4 into stack + 8)
- CALLpcrel32 @struct, csr_32, implicit %esp, implicit %ssp, implicit-def %esp, implicit-def %ssp,
- ADJCALLSTACKUP32 20, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
+ ADJCALLSTACKDOWN32 16, 0, 0, implicit-def dead $esp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $esp, implicit $ssp
+ %0 = COPY $esp
+ MOV32mi %0, 1, $noreg, 12, $noreg, 4 :: (store 4 into stack + 12)
+ MOV32mi %0, 1, $noreg, 8, $noreg, 3 :: (store 4 into stack + 8)
+ MOV32mi %0, 1, $noreg, 4, $noreg, 2 :: (store 4 into stack + 4)
+ MOV32mi %0, 1, $noreg, 0, $noreg, 1 :: (store 4 into stack)
+ CALLpcrel32 @good, csr_32, implicit $esp, implicit $ssp, implicit-def $esp, implicit-def $ssp
+ ADJCALLSTACKUP32 16, 0, implicit-def dead $esp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $esp, implicit $ssp
+ ADJCALLSTACKDOWN32 20, 0, 0, implicit-def dead $esp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $esp, implicit $ssp
+ %1 = MOV32rm %stack.2.s, 1, $noreg, 0, $noreg :: (load 4 from %stack.2.s, align 8)
+ %2 = MOV32rm %stack.2.s, 1, $noreg, 4, $noreg :: (load 4 from %stack.2.s + 4)
+ %3 = COPY $esp
+ MOV32mr %3, 1, $noreg, 4, $noreg, killed %2 :: (store 4)
+ MOV32mr %3, 1, $noreg, 0, $noreg, killed %1 :: (store 4)
+ %4 = LEA32r %stack.0.p, 1, $noreg, 0, $noreg
+ MOV32mr %3, 1, $noreg, 16, $noreg, killed %4 :: (store 4 into stack + 16)
+ %5 = LEA32r %stack.1.q, 1, $noreg, 0, $noreg
+ MOV32mr %3, 1, $noreg, 12, $noreg, killed %5 :: (store 4 into stack + 12)
+ MOV32mi %3, 1, $noreg, 8, $noreg, 6 :: (store 4 into stack + 8)
+ CALLpcrel32 @struct, csr_32, implicit $esp, implicit $ssp, implicit-def $esp, implicit-def $ssp,
+ ADJCALLSTACKUP32 20, 0, implicit-def dead $esp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $esp, implicit $ssp
RET 0
...
diff --git a/llvm/test/CodeGen/X86/mul-constant-i16.ll b/llvm/test/CodeGen/X86/mul-constant-i16.ll
index 2036eae..10fc16b 100644
--- a/llvm/test/CodeGen/X86/mul-constant-i16.ll
+++ b/llvm/test/CodeGen/X86/mul-constant-i16.ll
@@ -21,14 +21,14 @@
; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl %eax, %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_2:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal (%rdi,%rdi), %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 2
ret i16 %mul
@@ -39,14 +39,14 @@
; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_3:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal (%rdi,%rdi,2), %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 3
ret i16 %mul
@@ -57,14 +57,14 @@
; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $2, %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_4:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal (,%rdi,4), %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 4
ret i16 %mul
@@ -75,14 +75,14 @@
; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_5:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal (%rdi,%rdi,4), %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 5
ret i16 %mul
@@ -94,15 +94,15 @@
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl %eax, %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_6:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: addl %edi, %edi
; X64-NEXT: leal (%rdi,%rdi,2), %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 6
ret i16 %mul
@@ -114,15 +114,15 @@
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: leal (,%ecx,8), %eax
; X86-NEXT: subl %ecx, %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_7:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal (,%rdi,8), %eax
; X64-NEXT: subl %edi, %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 7
ret i16 %mul
@@ -133,14 +133,14 @@
; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $3, %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_8:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal (,%rdi,8), %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 8
ret i16 %mul
@@ -151,14 +151,14 @@
; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,8), %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_9:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal (%rdi,%rdi,8), %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 9
ret i16 %mul
@@ -170,15 +170,15 @@
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl %eax, %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_10:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: addl %edi, %edi
; X64-NEXT: leal (%rdi,%rdi,4), %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 10
ret i16 %mul
@@ -190,15 +190,15 @@
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %ecx
; X86-NEXT: leal (%eax,%ecx,2), %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_11:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal (%rdi,%rdi,4), %eax
; X64-NEXT: leal (%rdi,%rax,2), %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 11
ret i16 %mul
@@ -210,15 +210,15 @@
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $2, %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_12:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: shll $2, %edi
; X64-NEXT: leal (%rdi,%rdi,2), %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 12
ret i16 %mul
@@ -230,15 +230,15 @@
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,2), %ecx
; X86-NEXT: leal (%eax,%ecx,4), %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_13:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal (%rdi,%rdi,2), %eax
; X64-NEXT: leal (%rdi,%rax,4), %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 13
ret i16 %mul
@@ -251,16 +251,16 @@
; X86-NEXT: leal (%ecx,%ecx,2), %eax
; X86-NEXT: leal (%ecx,%eax,4), %eax
; X86-NEXT: addl %ecx, %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_14:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal (%rdi,%rdi,2), %eax
; X64-NEXT: leal (%rdi,%rax,4), %eax
; X64-NEXT: addl %edi, %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 14
ret i16 %mul
@@ -272,15 +272,15 @@
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_15:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal (%rdi,%rdi,4), %eax
; X64-NEXT: leal (%rax,%rax,2), %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 15
ret i16 %mul
@@ -291,7 +291,7 @@
; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $4, %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_16:
@@ -310,16 +310,16 @@
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: shll $4, %eax
; X86-NEXT: addl %ecx, %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_17:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shll $4, %eax
; X64-NEXT: leal (%rax,%rdi), %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 17
ret i16 %mul
@@ -331,15 +331,15 @@
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl %eax, %eax
; X86-NEXT: leal (%eax,%eax,8), %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_18:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: addl %edi, %edi
; X64-NEXT: leal (%rdi,%rdi,8), %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 18
ret i16 %mul
@@ -352,16 +352,16 @@
; X86-NEXT: leal (%ecx,%ecx,4), %eax
; X86-NEXT: shll $2, %eax
; X86-NEXT: subl %ecx, %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_19:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal (%rdi,%rdi,4), %eax
; X64-NEXT: shll $2, %eax
; X64-NEXT: subl %edi, %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 19
ret i16 %mul
@@ -373,15 +373,15 @@
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $2, %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_20:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: shll $2, %edi
; X64-NEXT: leal (%rdi,%rdi,4), %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 20
ret i16 %mul
@@ -393,15 +393,15 @@
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %ecx
; X86-NEXT: leal (%eax,%ecx,4), %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_21:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal (%rdi,%rdi,4), %eax
; X64-NEXT: leal (%rdi,%rax,4), %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 21
ret i16 %mul
@@ -414,16 +414,16 @@
; X86-NEXT: leal (%ecx,%ecx,4), %eax
; X86-NEXT: leal (%ecx,%eax,4), %eax
; X86-NEXT: addl %ecx, %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_22:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal (%rdi,%rdi,4), %eax
; X64-NEXT: leal (%rdi,%rax,4), %eax
; X64-NEXT: addl %edi, %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 22
ret i16 %mul
@@ -436,16 +436,16 @@
; X86-NEXT: leal (%ecx,%ecx,2), %eax
; X86-NEXT: shll $3, %eax
; X86-NEXT: subl %ecx, %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_23:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal (%rdi,%rdi,2), %eax
; X64-NEXT: shll $3, %eax
; X64-NEXT: subl %edi, %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 23
ret i16 %mul
@@ -457,15 +457,15 @@
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $3, %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_24:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: shll $3, %edi
; X64-NEXT: leal (%rdi,%rdi,2), %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 24
ret i16 %mul
@@ -477,15 +477,15 @@
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_25:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal (%rdi,%rdi,4), %eax
; X64-NEXT: leal (%rax,%rax,4), %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 25
ret i16 %mul
@@ -498,16 +498,16 @@
; X86-NEXT: leal (%ecx,%ecx,8), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
; X86-NEXT: subl %ecx, %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_26:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal (%rdi,%rdi,8), %eax
; X64-NEXT: leal (%rax,%rax,2), %eax
; X64-NEXT: subl %edi, %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 26
ret i16 %mul
@@ -519,15 +519,15 @@
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,8), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_27:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal (%rdi,%rdi,8), %eax
; X64-NEXT: leal (%rax,%rax,2), %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 27
ret i16 %mul
@@ -540,16 +540,16 @@
; X86-NEXT: leal (%ecx,%ecx,8), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
; X86-NEXT: addl %ecx, %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_28:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal (%rdi,%rdi,8), %eax
; X64-NEXT: leal (%rax,%rax,2), %eax
; X64-NEXT: addl %edi, %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 28
ret i16 %mul
@@ -563,17 +563,17 @@
; X86-NEXT: leal (%eax,%eax,2), %eax
; X86-NEXT: addl %ecx, %eax
; X86-NEXT: addl %ecx, %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_29:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal (%rdi,%rdi,8), %eax
; X64-NEXT: leal (%rax,%rax,2), %eax
; X64-NEXT: addl %edi, %eax
; X64-NEXT: addl %edi, %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 29
ret i16 %mul
@@ -587,7 +587,7 @@
; X86-NEXT: shll $5, %eax
; X86-NEXT: subl %ecx, %eax
; X86-NEXT: subl %ecx, %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_30:
@@ -596,7 +596,7 @@
; X64-NEXT: shll $5, %eax
; X64-NEXT: subl %edi, %eax
; X64-NEXT: subl %edi, %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 30
ret i16 %mul
@@ -609,7 +609,7 @@
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: shll $5, %eax
; X86-NEXT: subl %ecx, %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_31:
@@ -617,7 +617,7 @@
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shll $5, %eax
; X64-NEXT: subl %edi, %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 31
ret i16 %mul
@@ -628,7 +628,7 @@
; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $5, %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_32:
@@ -648,16 +648,16 @@
; X86-NEXT: leal 42(%eax,%eax,8), %ecx
; X86-NEXT: leal 2(%eax,%eax,4), %eax
; X86-NEXT: imull %ecx, %eax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_spec:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal 42(%rdi,%rdi,8), %ecx
; X64-NEXT: leal 2(%rdi,%rdi,4), %eax
; X64-NEXT: imull %ecx, %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 9
%add = add nsw i16 %mul, 42
diff --git a/llvm/test/CodeGen/X86/mul-constant-i32.ll b/llvm/test/CodeGen/X86/mul-constant-i32.ll
index 83024f5..b3d2c07 100644
--- a/llvm/test/CodeGen/X86/mul-constant-i32.ll
+++ b/llvm/test/CodeGen/X86/mul-constant-i32.ll
@@ -61,13 +61,13 @@
;
; X64-HSW-LABEL: test_mul_by_2:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: leal (%rdi,%rdi), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [7:1.00]
;
; X64-JAG-LABEL: test_mul_by_2:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: leal (%rdi,%rdi), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
@@ -79,25 +79,25 @@
;
; HSW-NOOPT-LABEL: test_mul_by_2:
; HSW-NOOPT: # %bb.0:
-; HSW-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
+; HSW-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi
; HSW-NOOPT-NEXT: leal (%rdi,%rdi), %eax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [7:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_2:
; JAG-NOOPT: # %bb.0:
-; JAG-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
+; JAG-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi
; JAG-NOOPT-NEXT: leal (%rdi,%rdi), %eax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_2:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi
; X64-SLM-NEXT: leal (%rdi,%rdi), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_2:
; SLM-NOOPT: # %bb.0:
-; SLM-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
+; SLM-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi
; SLM-NOOPT-NEXT: leal (%rdi,%rdi), %eax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 2
@@ -112,13 +112,13 @@
;
; X64-HSW-LABEL: test_mul_by_3:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [7:1.00]
;
; X64-JAG-LABEL: test_mul_by_3:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
@@ -129,25 +129,25 @@
;
; HSW-NOOPT-LABEL: test_mul_by_3:
; HSW-NOOPT: # %bb.0:
-; HSW-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
+; HSW-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi
; HSW-NOOPT-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [7:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_3:
; JAG-NOOPT: # %bb.0:
-; JAG-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
+; JAG-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi
; JAG-NOOPT-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_3:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi
; X64-SLM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_3:
; SLM-NOOPT: # %bb.0:
-; SLM-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
+; SLM-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi
; SLM-NOOPT-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 3
@@ -163,13 +163,13 @@
;
; X64-HSW-LABEL: test_mul_by_4:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: leal (,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [7:1.00]
;
; X64-JAG-LABEL: test_mul_by_4:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: leal (,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
@@ -181,25 +181,25 @@
;
; HSW-NOOPT-LABEL: test_mul_by_4:
; HSW-NOOPT: # %bb.0:
-; HSW-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
+; HSW-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi
; HSW-NOOPT-NEXT: leal (,%rdi,4), %eax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [7:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_4:
; JAG-NOOPT: # %bb.0:
-; JAG-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
+; JAG-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi
; JAG-NOOPT-NEXT: leal (,%rdi,4), %eax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_4:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi
; X64-SLM-NEXT: leal (,%rdi,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_4:
; SLM-NOOPT: # %bb.0:
-; SLM-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
+; SLM-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi
; SLM-NOOPT-NEXT: leal (,%rdi,4), %eax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 4
@@ -214,13 +214,13 @@
;
; X64-HSW-LABEL: test_mul_by_5:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [7:1.00]
;
; X64-JAG-LABEL: test_mul_by_5:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
@@ -231,25 +231,25 @@
;
; HSW-NOOPT-LABEL: test_mul_by_5:
; HSW-NOOPT: # %bb.0:
-; HSW-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
+; HSW-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi
; HSW-NOOPT-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [7:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_5:
; JAG-NOOPT: # %bb.0:
-; JAG-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
+; JAG-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi
; JAG-NOOPT-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_5:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi
; X64-SLM-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_5:
; SLM-NOOPT: # %bb.0:
-; SLM-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
+; SLM-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi
; SLM-NOOPT-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 5
@@ -266,14 +266,14 @@
;
; X64-HSW-LABEL: test_mul_by_6:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: addl %edi, %edi # sched: [1:0.25]
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [7:1.00]
;
; X64-JAG-LABEL: test_mul_by_6:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: addl %edi, %edi # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
@@ -295,7 +295,7 @@
;
; X64-SLM-LABEL: test_mul_by_6:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi
; X64-SLM-NEXT: addl %edi, %edi # sched: [1:0.50]
; X64-SLM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
@@ -318,14 +318,14 @@
;
; X64-HSW-LABEL: test_mul_by_7:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: subl %edi, %eax # sched: [1:0.25]
; X64-HSW-NEXT: retq # sched: [7:1.00]
;
; X64-JAG-LABEL: test_mul_by_7:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: subl %edi, %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
@@ -347,7 +347,7 @@
;
; X64-SLM-LABEL: test_mul_by_7:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi
; X64-SLM-NEXT: leal (,%rdi,8), %eax # sched: [1:1.00]
; X64-SLM-NEXT: subl %edi, %eax # sched: [1:0.50]
; X64-SLM-NEXT: retq # sched: [4:1.00]
@@ -369,13 +369,13 @@
;
; X64-HSW-LABEL: test_mul_by_8:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [7:1.00]
;
; X64-JAG-LABEL: test_mul_by_8:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
@@ -387,25 +387,25 @@
;
; HSW-NOOPT-LABEL: test_mul_by_8:
; HSW-NOOPT: # %bb.0:
-; HSW-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
+; HSW-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi
; HSW-NOOPT-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [7:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_8:
; JAG-NOOPT: # %bb.0:
-; JAG-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
+; JAG-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi
; JAG-NOOPT-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_8:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi
; X64-SLM-NEXT: leal (,%rdi,8), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_8:
; SLM-NOOPT: # %bb.0:
-; SLM-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
+; SLM-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi
; SLM-NOOPT-NEXT: leal (,%rdi,8), %eax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 8
@@ -420,13 +420,13 @@
;
; X64-HSW-LABEL: test_mul_by_9:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [7:1.00]
;
; X64-JAG-LABEL: test_mul_by_9:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
@@ -437,25 +437,25 @@
;
; HSW-NOOPT-LABEL: test_mul_by_9:
; HSW-NOOPT: # %bb.0:
-; HSW-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
+; HSW-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi
; HSW-NOOPT-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [7:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_9:
; JAG-NOOPT: # %bb.0:
-; JAG-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
+; JAG-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi
; JAG-NOOPT-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_9:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi
; X64-SLM-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_9:
; SLM-NOOPT: # %bb.0:
-; SLM-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
+; SLM-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi
; SLM-NOOPT-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 9
@@ -472,14 +472,14 @@
;
; X64-HSW-LABEL: test_mul_by_10:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: addl %edi, %edi # sched: [1:0.25]
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [7:1.00]
;
; X64-JAG-LABEL: test_mul_by_10:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: addl %edi, %edi # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
@@ -501,7 +501,7 @@
;
; X64-SLM-LABEL: test_mul_by_10:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi
; X64-SLM-NEXT: addl %edi, %edi # sched: [1:0.50]
; X64-SLM-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
@@ -524,14 +524,14 @@
;
; X64-HSW-LABEL: test_mul_by_11:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rax,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [7:1.00]
;
; X64-JAG-LABEL: test_mul_by_11:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rax,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
@@ -574,14 +574,14 @@
;
; X64-HSW-LABEL: test_mul_by_12:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: shll $2, %edi # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [7:1.00]
;
; X64-JAG-LABEL: test_mul_by_12:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: shll $2, %edi # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
@@ -603,7 +603,7 @@
;
; X64-SLM-LABEL: test_mul_by_12:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi
; X64-SLM-NEXT: shll $2, %edi # sched: [1:1.00]
; X64-SLM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
@@ -626,14 +626,14 @@
;
; X64-HSW-LABEL: test_mul_by_13:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [7:1.00]
;
; X64-JAG-LABEL: test_mul_by_13:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
@@ -677,7 +677,7 @@
;
; X64-HSW-LABEL: test_mul_by_14:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: addl %edi, %eax # sched: [1:0.25]
@@ -685,7 +685,7 @@
;
; X64-JAG-LABEL: test_mul_by_14:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: addl %edi, %eax # sched: [1:0.50]
@@ -729,14 +729,14 @@
;
; X64-HSW-LABEL: test_mul_by_15:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [7:1.00]
;
; X64-JAG-LABEL: test_mul_by_15:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
@@ -758,7 +758,7 @@
;
; X64-SLM-LABEL: test_mul_by_15:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi
; X64-SLM-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: leal (%rax,%rax,2), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
@@ -834,7 +834,7 @@
;
; X64-HSW-LABEL: test_mul_by_17:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: movl %edi, %eax # sched: [1:0.25]
; X64-HSW-NEXT: shll $4, %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rdi), %eax # sched: [1:0.50]
@@ -842,7 +842,7 @@
;
; X64-JAG-LABEL: test_mul_by_17:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: movl %edi, %eax # sched: [1:0.50]
; X64-JAG-NEXT: shll $4, %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rdi), %eax # sched: [1:0.50]
@@ -865,7 +865,7 @@
;
; X64-SLM-LABEL: test_mul_by_17:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi
; X64-SLM-NEXT: movl %edi, %eax # sched: [1:0.50]
; X64-SLM-NEXT: shll $4, %eax # sched: [1:1.00]
; X64-SLM-NEXT: leal (%rax,%rdi), %eax # sched: [1:1.00]
@@ -889,14 +889,14 @@
;
; X64-HSW-LABEL: test_mul_by_18:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: addl %edi, %edi # sched: [1:0.25]
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [7:1.00]
;
; X64-JAG-LABEL: test_mul_by_18:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: addl %edi, %edi # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
@@ -918,7 +918,7 @@
;
; X64-SLM-LABEL: test_mul_by_18:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi
; X64-SLM-NEXT: addl %edi, %edi # sched: [1:0.50]
; X64-SLM-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
@@ -942,7 +942,7 @@
;
; X64-HSW-LABEL: test_mul_by_19:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: shll $2, %eax # sched: [1:0.50]
; X64-HSW-NEXT: subl %edi, %eax # sched: [1:0.25]
@@ -950,7 +950,7 @@
;
; X64-JAG-LABEL: test_mul_by_19:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: shll $2, %eax # sched: [1:0.50]
; X64-JAG-NEXT: subl %edi, %eax # sched: [1:0.50]
@@ -994,14 +994,14 @@
;
; X64-HSW-LABEL: test_mul_by_20:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: shll $2, %edi # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [7:1.00]
;
; X64-JAG-LABEL: test_mul_by_20:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: shll $2, %edi # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
@@ -1023,7 +1023,7 @@
;
; X64-SLM-LABEL: test_mul_by_20:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi
; X64-SLM-NEXT: shll $2, %edi # sched: [1:1.00]
; X64-SLM-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
@@ -1046,14 +1046,14 @@
;
; X64-HSW-LABEL: test_mul_by_21:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [7:1.00]
;
; X64-JAG-LABEL: test_mul_by_21:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
@@ -1097,7 +1097,7 @@
;
; X64-HSW-LABEL: test_mul_by_22:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: addl %edi, %eax # sched: [1:0.25]
@@ -1105,7 +1105,7 @@
;
; X64-JAG-LABEL: test_mul_by_22:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: addl %edi, %eax # sched: [1:0.50]
@@ -1150,7 +1150,7 @@
;
; X64-HSW-LABEL: test_mul_by_23:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: shll $3, %eax # sched: [1:0.50]
; X64-HSW-NEXT: subl %edi, %eax # sched: [1:0.25]
@@ -1158,7 +1158,7 @@
;
; X64-JAG-LABEL: test_mul_by_23:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: shll $3, %eax # sched: [1:0.50]
; X64-JAG-NEXT: subl %edi, %eax # sched: [1:0.50]
@@ -1202,14 +1202,14 @@
;
; X64-HSW-LABEL: test_mul_by_24:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: shll $3, %edi # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [7:1.00]
;
; X64-JAG-LABEL: test_mul_by_24:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: shll $3, %edi # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
@@ -1231,7 +1231,7 @@
;
; X64-SLM-LABEL: test_mul_by_24:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi
; X64-SLM-NEXT: shll $3, %edi # sched: [1:1.00]
; X64-SLM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
@@ -1254,14 +1254,14 @@
;
; X64-HSW-LABEL: test_mul_by_25:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rax,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [7:1.00]
;
; X64-JAG-LABEL: test_mul_by_25:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rax,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
@@ -1283,7 +1283,7 @@
;
; X64-SLM-LABEL: test_mul_by_25:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi
; X64-SLM-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: leal (%rax,%rax,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
@@ -1307,7 +1307,7 @@
;
; X64-HSW-LABEL: test_mul_by_26:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: subl %edi, %eax # sched: [1:0.25]
@@ -1315,7 +1315,7 @@
;
; X64-JAG-LABEL: test_mul_by_26:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: subl %edi, %eax # sched: [1:0.50]
@@ -1359,14 +1359,14 @@
;
; X64-HSW-LABEL: test_mul_by_27:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [7:1.00]
;
; X64-JAG-LABEL: test_mul_by_27:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
@@ -1388,7 +1388,7 @@
;
; X64-SLM-LABEL: test_mul_by_27:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi
; X64-SLM-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:1.00]
; X64-SLM-NEXT: leal (%rax,%rax,2), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
@@ -1412,7 +1412,7 @@
;
; X64-HSW-LABEL: test_mul_by_28:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: addl %edi, %eax # sched: [1:0.25]
@@ -1420,7 +1420,7 @@
;
; X64-JAG-LABEL: test_mul_by_28:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: addl %edi, %eax # sched: [1:0.50]
@@ -1466,7 +1466,7 @@
;
; X64-HSW-LABEL: test_mul_by_29:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: addl %edi, %eax # sched: [1:0.25]
@@ -1475,7 +1475,7 @@
;
; X64-JAG-LABEL: test_mul_by_29:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: addl %edi, %eax # sched: [1:0.50]
@@ -1681,7 +1681,7 @@
;
; X64-HSW-LABEL: test_mul_spec:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %ecx # sched: [1:0.50]
; X64-HSW-NEXT: addl $42, %ecx # sched: [1:0.25]
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
@@ -1691,7 +1691,7 @@
;
; X64-JAG-LABEL: test_mul_spec:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: leal 42(%rdi,%rdi,8), %ecx # sched: [1:0.50]
; X64-JAG-NEXT: leal 2(%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: imull %ecx, %eax # sched: [3:1.00]
@@ -1707,7 +1707,7 @@
;
; HSW-NOOPT-LABEL: test_mul_spec:
; HSW-NOOPT: # %bb.0:
-; HSW-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
+; HSW-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi
; HSW-NOOPT-NEXT: leal (%rdi,%rdi,8), %ecx # sched: [1:0.50]
; HSW-NOOPT-NEXT: addl $42, %ecx # sched: [1:0.25]
; HSW-NOOPT-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
@@ -1717,7 +1717,7 @@
;
; JAG-NOOPT-LABEL: test_mul_spec:
; JAG-NOOPT: # %bb.0:
-; JAG-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
+; JAG-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi
; JAG-NOOPT-NEXT: leal 42(%rdi,%rdi,8), %ecx # sched: [1:0.50]
; JAG-NOOPT-NEXT: leal 2(%rdi,%rdi,4), %eax # sched: [1:0.50]
; JAG-NOOPT-NEXT: imull %ecx, %eax # sched: [3:1.00]
@@ -1725,7 +1725,7 @@
;
; X64-SLM-LABEL: test_mul_spec:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi
; X64-SLM-NEXT: leal 42(%rdi,%rdi,8), %ecx # sched: [1:1.00]
; X64-SLM-NEXT: leal 2(%rdi,%rdi,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: imull %ecx, %eax # sched: [3:1.00]
@@ -1733,7 +1733,7 @@
;
; SLM-NOOPT-LABEL: test_mul_spec:
; SLM-NOOPT: # %bb.0:
-; SLM-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
+; SLM-NOOPT-NEXT: # kill: def $edi killed $edi def $rdi
; SLM-NOOPT-NEXT: leal 42(%rdi,%rdi,8), %ecx # sched: [1:1.00]
; SLM-NOOPT-NEXT: leal 2(%rdi,%rdi,4), %eax # sched: [1:1.00]
; SLM-NOOPT-NEXT: imull %ecx, %eax # sched: [3:1.00]
diff --git a/llvm/test/CodeGen/X86/mul-constant-result.ll b/llvm/test/CodeGen/X86/mul-constant-result.ll
index bec0ed9..4d1b23a 100644
--- a/llvm/test/CodeGen/X86/mul-constant-result.ll
+++ b/llvm/test/CodeGen/X86/mul-constant-result.ll
@@ -188,7 +188,7 @@
;
; X64-HSW-LABEL: mult:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: cmpl $1, %esi
; X64-HSW-NEXT: movl $1, %ecx
; X64-HSW-NEXT: movl %esi, %eax
@@ -202,60 +202,60 @@
; X64-HSW-NEXT: jmpq *.LJTI0_0(,%rdi,8)
; X64-HSW-NEXT: .LBB0_2:
; X64-HSW-NEXT: addl %eax, %eax
-; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
+; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_36:
; X64-HSW-NEXT: xorl %eax, %eax
; X64-HSW-NEXT: .LBB0_37:
-; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
+; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_3:
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax
-; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
+; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_4:
; X64-HSW-NEXT: shll $2, %eax
-; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
+; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_5:
; X64-HSW-NEXT: leal (%rax,%rax,4), %eax
-; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
+; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_6:
; X64-HSW-NEXT: addl %eax, %eax
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax
-; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
+; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_7:
; X64-HSW-NEXT: leal (,%rax,8), %ecx
; X64-HSW-NEXT: jmp .LBB0_8
; X64-HSW-NEXT: .LBB0_9:
; X64-HSW-NEXT: shll $3, %eax
-; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
+; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_10:
; X64-HSW-NEXT: leal (%rax,%rax,8), %eax
-; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
+; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_11:
; X64-HSW-NEXT: addl %eax, %eax
; X64-HSW-NEXT: leal (%rax,%rax,4), %eax
-; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
+; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_12:
; X64-HSW-NEXT: leal (%rax,%rax,4), %ecx
; X64-HSW-NEXT: leal (%rax,%rcx,2), %eax
-; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
+; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_13:
; X64-HSW-NEXT: shll $2, %eax
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax
-; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
+; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_14:
; X64-HSW-NEXT: leal (%rax,%rax,2), %ecx
; X64-HSW-NEXT: leal (%rax,%rcx,4), %eax
-; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
+; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_15:
; X64-HSW-NEXT: leal (%rax,%rax,2), %ecx
@@ -263,11 +263,11 @@
; X64-HSW-NEXT: .LBB0_18:
; X64-HSW-NEXT: leal (%rax,%rax,4), %eax
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax
-; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
+; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_19:
; X64-HSW-NEXT: shll $4, %eax
-; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
+; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_20:
; X64-HSW-NEXT: movl %eax, %ecx
@@ -276,7 +276,7 @@
; X64-HSW-NEXT: .LBB0_21:
; X64-HSW-NEXT: addl %eax, %eax
; X64-HSW-NEXT: leal (%rax,%rax,8), %eax
-; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
+; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_22:
; X64-HSW-NEXT: leal (%rax,%rax,4), %ecx
@@ -285,12 +285,12 @@
; X64-HSW-NEXT: .LBB0_23:
; X64-HSW-NEXT: shll $2, %eax
; X64-HSW-NEXT: leal (%rax,%rax,4), %eax
-; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
+; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_24:
; X64-HSW-NEXT: leal (%rax,%rax,4), %ecx
; X64-HSW-NEXT: leal (%rax,%rcx,4), %eax
-; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
+; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_25:
; X64-HSW-NEXT: leal (%rax,%rax,4), %ecx
@@ -304,12 +304,12 @@
; X64-HSW-NEXT: .LBB0_27:
; X64-HSW-NEXT: shll $3, %eax
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax
-; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
+; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_28:
; X64-HSW-NEXT: leal (%rax,%rax,4), %eax
; X64-HSW-NEXT: leal (%rax,%rax,4), %eax
-; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
+; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_29:
; X64-HSW-NEXT: leal (%rax,%rax,8), %ecx
@@ -318,7 +318,7 @@
; X64-HSW-NEXT: .LBB0_30:
; X64-HSW-NEXT: leal (%rax,%rax,8), %eax
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax
-; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
+; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_31:
; X64-HSW-NEXT: leal (%rax,%rax,8), %ecx
@@ -331,7 +331,7 @@
; X64-HSW-NEXT: .LBB0_17:
; X64-HSW-NEXT: addl %eax, %ecx
; X64-HSW-NEXT: movl %ecx, %eax
-; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
+; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_33:
; X64-HSW-NEXT: movl %eax, %ecx
@@ -344,11 +344,11 @@
; X64-HSW-NEXT: .LBB0_8:
; X64-HSW-NEXT: subl %eax, %ecx
; X64-HSW-NEXT: movl %ecx, %eax
-; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
+; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_35:
; X64-HSW-NEXT: shll $5, %eax
-; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
+; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax
; X64-HSW-NEXT: retq
%3 = icmp eq i32 %1, 0
%4 = icmp sgt i32 %1, 1
diff --git a/llvm/test/CodeGen/X86/negate-i1.ll b/llvm/test/CodeGen/X86/negate-i1.ll
index c9ca52b..743f1a1 100644
--- a/llvm/test/CodeGen/X86/negate-i1.ll
+++ b/llvm/test/CodeGen/X86/negate-i1.ll
@@ -49,7 +49,7 @@
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andl $1, %eax
; X32-NEXT: negl %eax
-; X32-NEXT: # kill: def %ax killed %ax killed %eax
+; X32-NEXT: # kill: def $ax killed $ax killed $eax
; X32-NEXT: retl
%b = sext i1 %a to i16
ret i16 %b
@@ -66,7 +66,7 @@
; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: negl %eax
-; X32-NEXT: # kill: def %ax killed %ax killed %eax
+; X32-NEXT: # kill: def $ax killed $ax killed $eax
; X32-NEXT: retl
%b = sext i1 %a to i16
ret i16 %b
@@ -109,7 +109,7 @@
define i64 @select_i64_neg1_or_0(i1 %a) {
; X64-LABEL: select_i64_neg1_or_0:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: andl $1, %edi
; X64-NEXT: negq %rdi
; X64-NEXT: movq %rdi, %rax
diff --git a/llvm/test/CodeGen/X86/non-value-mem-operand.mir b/llvm/test/CodeGen/X86/non-value-mem-operand.mir
index b537a63..a290b04 100644
--- a/llvm/test/CodeGen/X86/non-value-mem-operand.mir
+++ b/llvm/test/CodeGen/X86/non-value-mem-operand.mir
@@ -123,12 +123,12 @@
alignment: 4
tracksRegLiveness: true
fixedStack:
- - { id: 0, type: spill-slot, offset: -56, size: 8, alignment: 8, callee-saved-register: '%rbx' }
- - { id: 1, type: spill-slot, offset: -48, size: 8, alignment: 16, callee-saved-register: '%r12' }
- - { id: 2, type: spill-slot, offset: -40, size: 8, alignment: 8, callee-saved-register: '%r13' }
- - { id: 3, type: spill-slot, offset: -32, size: 8, alignment: 16, callee-saved-register: '%r14' }
- - { id: 4, type: spill-slot, offset: -24, size: 8, alignment: 8, callee-saved-register: '%r15' }
- - { id: 5, type: spill-slot, offset: -16, size: 8, alignment: 16, callee-saved-register: '%rbp' }
+ - { id: 0, type: spill-slot, offset: -56, size: 8, alignment: 8, callee-saved-register: '$rbx' }
+ - { id: 1, type: spill-slot, offset: -48, size: 8, alignment: 16, callee-saved-register: '$r12' }
+ - { id: 2, type: spill-slot, offset: -40, size: 8, alignment: 8, callee-saved-register: '$r13' }
+ - { id: 3, type: spill-slot, offset: -32, size: 8, alignment: 16, callee-saved-register: '$r14' }
+ - { id: 4, type: spill-slot, offset: -24, size: 8, alignment: 8, callee-saved-register: '$r15' }
+ - { id: 5, type: spill-slot, offset: -16, size: 8, alignment: 16, callee-saved-register: '$rbp' }
stack:
- { id: 0, offset: -88, size: 8, alignment: 8 }
- { id: 1, offset: -96, size: 8, alignment: 8 }
@@ -143,151 +143,151 @@
body: |
bb.0.bb:
successors: %bb.1.bb2(0x00000800), %bb.3.bb3(0x7ffff800)
- liveins: %rbp, %r15, %r14, %r13, %r12, %rbx
+ liveins: $rbp, $r15, $r14, $r13, $r12, $rbx
- frame-setup PUSH64r killed %rbp, implicit-def %rsp, implicit %rsp
- frame-setup PUSH64r killed %r15, implicit-def %rsp, implicit %rsp
- frame-setup PUSH64r killed %r14, implicit-def %rsp, implicit %rsp
- frame-setup PUSH64r killed %r13, implicit-def %rsp, implicit %rsp
- frame-setup PUSH64r killed %r12, implicit-def %rsp, implicit %rsp
- frame-setup PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp
- %rsp = frame-setup SUB64ri8 %rsp, 56, implicit-def dead %eflags
- CALL64r undef %rax, csr_64, implicit %rsp, implicit undef %rdi, implicit undef %rsi, implicit-def %rsp, implicit-def %rax
- TEST64rr %rax, %rax, implicit-def %eflags
- JNE_1 %bb.3.bb3, implicit killed %eflags
+ frame-setup PUSH64r killed $rbp, implicit-def $rsp, implicit $rsp
+ frame-setup PUSH64r killed $r15, implicit-def $rsp, implicit $rsp
+ frame-setup PUSH64r killed $r14, implicit-def $rsp, implicit $rsp
+ frame-setup PUSH64r killed $r13, implicit-def $rsp, implicit $rsp
+ frame-setup PUSH64r killed $r12, implicit-def $rsp, implicit $rsp
+ frame-setup PUSH64r killed $rbx, implicit-def $rsp, implicit $rsp
+ $rsp = frame-setup SUB64ri8 $rsp, 56, implicit-def dead $eflags
+ CALL64r undef $rax, csr_64, implicit $rsp, implicit undef $rdi, implicit undef $rsi, implicit-def $rsp, implicit-def $rax
+ TEST64rr $rax, $rax, implicit-def $eflags
+ JNE_1 %bb.3.bb3, implicit killed $eflags
bb.1.bb2:
successors: %bb.2(0x40000000), %bb.13.bb59(0x40000000)
- %ebp = XOR32rr undef %ebp, undef %ebp, implicit-def dead %eflags
- TEST8rr %bpl, %bpl, implicit-def %eflags
- JE_1 %bb.13.bb59, implicit killed %eflags
+ $ebp = XOR32rr undef $ebp, undef $ebp, implicit-def dead $eflags
+ TEST8rr $bpl, $bpl, implicit-def $eflags
+ JE_1 %bb.13.bb59, implicit killed $eflags
bb.2:
successors: %bb.12.bb51(0x80000000)
- liveins: %ebp
+ liveins: $ebp
- %xmm0 = XORPSrr undef %xmm0, undef %xmm0
- %ebx = IMPLICIT_DEF implicit-def %rbx
+ $xmm0 = XORPSrr undef $xmm0, undef $xmm0
+ $ebx = IMPLICIT_DEF implicit-def $rbx
JMP_1 %bb.12.bb51
bb.3.bb3:
successors: %bb.4.bb7(0x80000000)
- liveins: %rax
+ liveins: $rax
- MOV64mr %rsp, 1, %noreg, 32, %noreg, %rax :: (store 8 into %stack.5)
- %r12 = MOV64rr killed %rax
- %r12 = ADD64ri8 killed %r12, 16, implicit-def dead %eflags
- %xmm0 = XORPSrr undef %xmm0, undef %xmm0
- %esi = XOR32rr undef %esi, undef %esi, implicit-def dead %eflags
- %rax = MOV64ri %const.0
- %xmm1 = MOVSDrm killed %rax, 1, %noreg, 0, %noreg :: (load 8 from constant-pool)
- MOVSDmr %rsp, 1, %noreg, 40, %noreg, killed %xmm1 :: (store 8 into %stack.4)
- %eax = IMPLICIT_DEF
- %ecx = XOR32rr undef %ecx, undef %ecx, implicit-def dead %eflags
+ MOV64mr $rsp, 1, $noreg, 32, $noreg, $rax :: (store 8 into %stack.5)
+ $r12 = MOV64rr killed $rax
+ $r12 = ADD64ri8 killed $r12, 16, implicit-def dead $eflags
+ $xmm0 = XORPSrr undef $xmm0, undef $xmm0
+ $esi = XOR32rr undef $esi, undef $esi, implicit-def dead $eflags
+ $rax = MOV64ri %const.0
+ $xmm1 = MOVSDrm killed $rax, 1, $noreg, 0, $noreg :: (load 8 from constant-pool)
+ MOVSDmr $rsp, 1, $noreg, 40, $noreg, killed $xmm1 :: (store 8 into %stack.4)
+ $eax = IMPLICIT_DEF
+ $ecx = XOR32rr undef $ecx, undef $ecx, implicit-def dead $eflags
bb.4.bb7:
successors: %bb.6.bb26(0x40000000), %bb.5.bb15(0x40000000)
- liveins: %eax, %ecx, %esi, %r12, %xmm0
+ liveins: $eax, $ecx, $esi, $r12, $xmm0
- %ebp = MOV32rr killed %ecx
- %ebx = MOV32rr killed %eax, implicit-def %rbx
- %r14d = MOV32rr %ebx, implicit-def %r14
- TEST8rr %sil, %sil, implicit-def %eflags
- JNE_1 %bb.6.bb26, implicit %eflags
+ $ebp = MOV32rr killed $ecx
+ $ebx = MOV32rr killed $eax, implicit-def $rbx
+ $r14d = MOV32rr $ebx, implicit-def $r14
+ TEST8rr $sil, $sil, implicit-def $eflags
+ JNE_1 %bb.6.bb26, implicit $eflags
bb.5.bb15:
successors: %bb.6.bb26(0x80000000)
- liveins: %ebp, %rbx, %r14, %xmm0
+ liveins: $ebp, $rbx, $r14, $xmm0
- MOV32mr %rsp, 1, %noreg, 24, %noreg, %ebx :: (store 4 into %stack.0, align 8)
- MOV32mr %rsp, 1, %noreg, 16, %noreg, %ebp :: (store 4 into %stack.1, align 8)
- MOVSDmr %rsp, 1, %noreg, 8, %noreg, killed %xmm0 :: (store 8 into %stack.2)
- %rax = MOV64rm %rsp, 1, %noreg, 32, %noreg :: (load 8 from %stack.5)
- MOV64mr %rsp, 1, %noreg, 48, %noreg, killed %rax :: (store 8 into %stack.3)
- %rax = MOV64ri @wibble
- STATEPOINT 2882400000, 0, 0, killed %rax, 2, 0, 2, 0, 2, 30, 2, 1, 2, 0, 2, 99, 2, 0, 2, 12, 2, 0, 2, 10, 1, 8, %rsp, 24, 2, 10, 2, 0, 2, 10, 1, 8, %rsp, 16, 2, 10, 2, 4278124286, 2, 6, 2, 4278124286, 2, 7, 1, 8, %rsp, 8, 2, 99, 2, 0, 2, 7, 2, 4278124286, 2, 99, 2, 0, 2, 13, 1, 8, %rsp, 48, 2, 7, 2, 4278124286, 2, 99, 2, 0, csr_64, implicit-def %rsp :: (volatile load 8 from %stack.0), (volatile load 8 from %stack.1), (volatile load 8 from %stack.2), (volatile load 8 from %stack.3)
- %esi = XOR32rr undef %esi, undef %esi, implicit-def dead %eflags
- %r12 = IMPLICIT_DEF
+ MOV32mr $rsp, 1, $noreg, 24, $noreg, $ebx :: (store 4 into %stack.0, align 8)
+ MOV32mr $rsp, 1, $noreg, 16, $noreg, $ebp :: (store 4 into %stack.1, align 8)
+ MOVSDmr $rsp, 1, $noreg, 8, $noreg, killed $xmm0 :: (store 8 into %stack.2)
+ $rax = MOV64rm $rsp, 1, $noreg, 32, $noreg :: (load 8 from %stack.5)
+ MOV64mr $rsp, 1, $noreg, 48, $noreg, killed $rax :: (store 8 into %stack.3)
+ $rax = MOV64ri @wibble
+ STATEPOINT 2882400000, 0, 0, killed $rax, 2, 0, 2, 0, 2, 30, 2, 1, 2, 0, 2, 99, 2, 0, 2, 12, 2, 0, 2, 10, 1, 8, $rsp, 24, 2, 10, 2, 0, 2, 10, 1, 8, $rsp, 16, 2, 10, 2, 4278124286, 2, 6, 2, 4278124286, 2, 7, 1, 8, $rsp, 8, 2, 99, 2, 0, 2, 7, 2, 4278124286, 2, 99, 2, 0, 2, 13, 1, 8, $rsp, 48, 2, 7, 2, 4278124286, 2, 99, 2, 0, csr_64, implicit-def $rsp :: (volatile load 8 from %stack.0), (volatile load 8 from %stack.1), (volatile load 8 from %stack.2), (volatile load 8 from %stack.3)
+ $esi = XOR32rr undef $esi, undef $esi, implicit-def dead $eflags
+ $r12 = IMPLICIT_DEF
bb.6.bb26:
successors: %bb.8.bb37(0x40000000), %bb.7.bb35(0x40000000)
- liveins: %ebp, %esi, %rbx, %r12, %r14
+ liveins: $ebp, $esi, $rbx, $r12, $r14
- %rax = MOV64ri @global.1
- %rax = MOV64rm killed %rax, 1, %noreg, 0, %noreg :: (dereferenceable load 8 from @global.1)
- TEST64rr %rax, %rax, implicit-def %eflags
- %rax = CMOVE64rr undef %rax, killed %rax, implicit killed %eflags
- %ecx = MOV32rm undef %rax, 1, %noreg, 0, %noreg :: (load 4 from `i32* undef`)
- %rdx = MOV64rm %r12, 8, %r14, 0, %noreg :: (load 8 from %ir.tmp3)
- %r15 = LEA64r %rdx, 1, %noreg, 1, _
- MOV64mr %r12, 8, %r14, 0, %noreg, %r15 :: (store 8 into %ir.tmp3)
- %ecx = SUB32rr killed %ecx, %edx, implicit-def dead %eflags, implicit killed %rdx
- MOV32mr undef %rax, 1, %noreg, 0, %noreg, killed %ecx :: (store 4 into `i32* undef`)
- %r13 = MOV64rm killed %rax, 1, %noreg, 768, %noreg :: (load 8 from %ir.tmp33)
- TEST8rr %sil, %sil, implicit-def %eflags
- %rax = IMPLICIT_DEF
- JNE_1 %bb.8.bb37, implicit %eflags
+ $rax = MOV64ri @global.1
+ $rax = MOV64rm killed $rax, 1, $noreg, 0, $noreg :: (dereferenceable load 8 from @global.1)
+ TEST64rr $rax, $rax, implicit-def $eflags
+ $rax = CMOVE64rr undef $rax, killed $rax, implicit killed $eflags
+ $ecx = MOV32rm undef $rax, 1, $noreg, 0, $noreg :: (load 4 from `i32* undef`)
+ $rdx = MOV64rm $r12, 8, $r14, 0, $noreg :: (load 8 from %ir.tmp3)
+ $r15 = LEA64r $rdx, 1, $noreg, 1, _
+ MOV64mr $r12, 8, $r14, 0, $noreg, $r15 :: (store 8 into %ir.tmp3)
+ $ecx = SUB32rr killed $ecx, $edx, implicit-def dead $eflags, implicit killed $rdx
+ MOV32mr undef $rax, 1, $noreg, 0, $noreg, killed $ecx :: (store 4 into `i32* undef`)
+ $r13 = MOV64rm killed $rax, 1, $noreg, 768, $noreg :: (load 8 from %ir.tmp33)
+ TEST8rr $sil, $sil, implicit-def $eflags
+ $rax = IMPLICIT_DEF
+ JNE_1 %bb.8.bb37, implicit $eflags
bb.7.bb35:
successors: %bb.8.bb37(0x80000000)
- liveins: %ebp, %rbx, %r12, %r13, %r14, %r15
+ liveins: $ebp, $rbx, $r12, $r13, $r14, $r15
- %rsi = MOV64ri @global
- %rax = MOV64ri @ham
- CALL64r killed %rax, csr_64, implicit %rsp, implicit undef %rdi, implicit %rsi, implicit-def %rsp, implicit-def %rax
- %esi = XOR32rr undef %esi, undef %esi, implicit-def dead %eflags
+ $rsi = MOV64ri @global
+ $rax = MOV64ri @ham
+ CALL64r killed $rax, csr_64, implicit $rsp, implicit undef $rdi, implicit $rsi, implicit-def $rsp, implicit-def $rax
+ $esi = XOR32rr undef $esi, undef $esi, implicit-def dead $eflags
bb.8.bb37:
successors: %bb.9.bb37(0x40000000), %bb.10.bb37(0x40000000)
- liveins: %ebp, %esi, %rax, %rbx, %r12, %r13, %r14, %r15
+ liveins: $ebp, $esi, $rax, $rbx, $r12, $r13, $r14, $r15
- %rcx = MOV64rm killed %rax, 1, %noreg, 760, %noreg :: (load 8 from %ir.tmp40)
- CMP64rr %r13, %rcx, implicit-def %eflags
- JL_1 %bb.10.bb37, implicit %eflags
+ $rcx = MOV64rm killed $rax, 1, $noreg, 760, $noreg :: (load 8 from %ir.tmp40)
+ CMP64rr $r13, $rcx, implicit-def $eflags
+ JL_1 %bb.10.bb37, implicit $eflags
bb.9.bb37:
successors: %bb.10.bb37(0x80000000)
- liveins: %ebp, %esi, %rbx, %r12, %r13, %r14, %r15
+ liveins: $ebp, $esi, $rbx, $r12, $r13, $r14, $r15
- %cl = MOV8rr %r13b, implicit killed %r13, implicit-def %rcx
+ $cl = MOV8rr $r13b, implicit killed $r13, implicit-def $rcx
bb.10.bb37:
successors: %bb.11.bb51.loopexit(0x00000800), %bb.4.bb7(0x7ffff800)
- liveins: %ebp, %esi, %rbx, %rcx, %r12, %r14, %r15
+ liveins: $ebp, $esi, $rbx, $rcx, $r12, $r14, $r15
- %cl = KILL %cl, implicit killed %rcx
- %r15 = SAR64rCL killed %r15, implicit-def dead %eflags, implicit %cl
- MOV64mr %r12, 8, killed %r14, 0, %noreg, killed %r15 :: (store 8 into %ir.tmp7)
- MOV64mi32 undef %rax, 1, %noreg, 0, %noreg, 0 :: (store 8 into `i64* undef`)
- %eax = LEA64_32r %rbx, 1, %noreg, 1, _
- %ecx = MOV32ri 6
- CMP32ri %eax, 15141, implicit-def %eflags
- %xmm0 = MOVSDrm %rsp, 1, %noreg, 40, %noreg :: (load 8 from %stack.4)
- JL_1 %bb.4.bb7, implicit %eflags
+ $cl = KILL $cl, implicit killed $rcx
+ $r15 = SAR64rCL killed $r15, implicit-def dead $eflags, implicit $cl
+ MOV64mr $r12, 8, killed $r14, 0, $noreg, killed $r15 :: (store 8 into %ir.tmp7)
+ MOV64mi32 undef $rax, 1, $noreg, 0, $noreg, 0 :: (store 8 into `i64* undef`)
+ $eax = LEA64_32r $rbx, 1, $noreg, 1, _
+ $ecx = MOV32ri 6
+ CMP32ri $eax, 15141, implicit-def $eflags
+ $xmm0 = MOVSDrm $rsp, 1, $noreg, 40, $noreg :: (load 8 from %stack.4)
+ JL_1 %bb.4.bb7, implicit $eflags
bb.11.bb51.loopexit:
successors: %bb.12.bb51(0x80000000)
- liveins: %ebp, %rbx
+ liveins: $ebp, $rbx
- %ebp = INC32r killed %ebp, implicit-def dead %eflags
- %ebx = INC32r %ebx, implicit-def dead %eflags, implicit killed %rbx, implicit-def %rbx
- %rax = MOV64ri %const.0
- %xmm0 = MOVSDrm killed %rax, 1, %noreg, 0, %noreg :: (load 8 from constant-pool)
+ $ebp = INC32r killed $ebp, implicit-def dead $eflags
+ $ebx = INC32r $ebx, implicit-def dead $eflags, implicit killed $rbx, implicit-def $rbx
+ $rax = MOV64ri %const.0
+ $xmm0 = MOVSDrm killed $rax, 1, $noreg, 0, $noreg :: (load 8 from constant-pool)
bb.12.bb51:
- liveins: %ebp, %rbx, %xmm0
+ liveins: $ebp, $rbx, $xmm0
- MOV32mr %rsp, 1, %noreg, 24, %noreg, %ebx, implicit killed %rbx :: (store 4 into %stack.0, align 8)
- MOV32mr %rsp, 1, %noreg, 16, %noreg, killed %ebp :: (store 4 into %stack.1, align 8)
- MOVSDmr %rsp, 1, %noreg, 8, %noreg, killed %xmm0 :: (store 8 into %stack.2)
- %rax = MOV64ri @wobble
- %edi = MOV32ri -121
- STATEPOINT 2882400000, 0, 1, killed %rax, %edi, 2, 0, 2, 0, 2, 38, 2, 1, 2, 0, 2, 270, 2, 4, 2, 12, 2, 0, 2, 11, 2, 4278124286, 2, 99, 2, 0, 2, 10, 1, 8, %rsp, 24, 2, 6, 2, 4278124286, 2, 99, 2, 0, 2, 99, 2, 0, 2, 10, 1, 8, %rsp, 16, 2, 10, 2, 4278124286, 2, 99, 2, 0, 2, 7, 1, 8, %rsp, 8, 2, 99, 2, 0, 2, 7, 2, 4278124286, 2, 99, 2, 0, 2, 13, 2, 4278124286, 2, 99, 2, 0, 2, 99, 2, 0, csr_64, implicit-def %rsp :: (volatile load 8 from %stack.0), (volatile load 8 from %stack.1), (volatile load 8 from %stack.2)
+ MOV32mr $rsp, 1, $noreg, 24, $noreg, $ebx, implicit killed $rbx :: (store 4 into %stack.0, align 8)
+ MOV32mr $rsp, 1, $noreg, 16, $noreg, killed $ebp :: (store 4 into %stack.1, align 8)
+ MOVSDmr $rsp, 1, $noreg, 8, $noreg, killed $xmm0 :: (store 8 into %stack.2)
+ $rax = MOV64ri @wobble
+ $edi = MOV32ri -121
+ STATEPOINT 2882400000, 0, 1, killed $rax, $edi, 2, 0, 2, 0, 2, 38, 2, 1, 2, 0, 2, 270, 2, 4, 2, 12, 2, 0, 2, 11, 2, 4278124286, 2, 99, 2, 0, 2, 10, 1, 8, $rsp, 24, 2, 6, 2, 4278124286, 2, 99, 2, 0, 2, 99, 2, 0, 2, 10, 1, 8, $rsp, 16, 2, 10, 2, 4278124286, 2, 99, 2, 0, 2, 7, 1, 8, $rsp, 8, 2, 99, 2, 0, 2, 7, 2, 4278124286, 2, 99, 2, 0, 2, 13, 2, 4278124286, 2, 99, 2, 0, 2, 99, 2, 0, csr_64, implicit-def $rsp :: (volatile load 8 from %stack.0), (volatile load 8 from %stack.1), (volatile load 8 from %stack.2)
bb.13.bb59:
- %rax = MOV64ri @wobble
- %edi = MOV32ri 8
- STATEPOINT 2882400000, 0, 1, killed %rax, %edi, 2, 0, 2, 0, 2, 38, 2, 1, 2, 0, 2, 123, 2, 4, 2, 12, 2, 0, 2, 13, 2, 0, 2, 99, 2, 4278124286, 2, 13, 2, 0, 2, 10, 2, 4278124286, 2, 99, 2, 4278124286, 2, 99, 2, 4278124286, 2, 99, 2, 4278124286, 2, 99, 2, 0, 2, 99, 2, 4278124286, 2, 99, 2, 4278124286, 2, 99, 2, 0, 2, 99, 2, 4278124286, 2, 99, 2, 0, 2, 13, 2, 0, 2, 99, 2, 4278124286, 2, 99, 2, 0, csr_64, implicit-def %rsp
+ $rax = MOV64ri @wobble
+ $edi = MOV32ri 8
+ STATEPOINT 2882400000, 0, 1, killed $rax, $edi, 2, 0, 2, 0, 2, 38, 2, 1, 2, 0, 2, 123, 2, 4, 2, 12, 2, 0, 2, 13, 2, 0, 2, 99, 2, 4278124286, 2, 13, 2, 0, 2, 10, 2, 4278124286, 2, 99, 2, 4278124286, 2, 99, 2, 4278124286, 2, 99, 2, 4278124286, 2, 99, 2, 0, 2, 99, 2, 4278124286, 2, 99, 2, 4278124286, 2, 99, 2, 0, 2, 99, 2, 4278124286, 2, 99, 2, 0, 2, 13, 2, 0, 2, 99, 2, 4278124286, 2, 99, 2, 0, csr_64, implicit-def $rsp
...
diff --git a/llvm/test/CodeGen/X86/oddshuffles.ll b/llvm/test/CodeGen/X86/oddshuffles.ll
index 0f6bde6..1fd4e0b 100644
--- a/llvm/test/CodeGen/X86/oddshuffles.ll
+++ b/llvm/test/CodeGen/X86/oddshuffles.ll
@@ -31,7 +31,7 @@
;
; AVX2-LABEL: v3i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
; AVX2-NEXT: vpextrq $1, %xmm0, 16(%rdi)
@@ -66,7 +66,7 @@
;
; AVX2-LABEL: v3f64:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,1,3]
; AVX2-NEXT: vmovhpd %xmm0, 16(%rdi)
@@ -226,7 +226,7 @@
;
; AVX2-LABEL: v5i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <0,5,1,6,3,u,u,u>
; AVX2-NEXT: vpermps %ymm1, %ymm2, %ymm1
@@ -276,7 +276,7 @@
;
; AVX2-LABEL: v5f32:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <0,5,1,6,3,u,u,u>
; AVX2-NEXT: vpermps %ymm1, %ymm2, %ymm1
@@ -442,7 +442,7 @@
;
; AVX2-LABEL: v7i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <0,6,3,6,1,7,4,u>
; AVX2-NEXT: vpermps %ymm0, %ymm2, %ymm0
@@ -1802,7 +1802,7 @@
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
; AVX1-NEXT: vmovaps %ymm1, 32(%rdi)
; AVX1-NEXT: vmovaps %ymm1, (%rdi)
-; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -1821,7 +1821,7 @@
; XOP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
; XOP-NEXT: vmovaps %ymm1, 32(%rdi)
; XOP-NEXT: vmovaps %ymm1, (%rdi)
-; XOP-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; XOP-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; XOP-NEXT: vzeroupper
; XOP-NEXT: retq
%shuffle = shufflevector <4 x double> %A, <4 x double> %A, <8 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/X86/or-lea.ll b/llvm/test/CodeGen/X86/or-lea.ll
index 9447ceb..6237427 100644
--- a/llvm/test/CodeGen/X86/or-lea.ll
+++ b/llvm/test/CodeGen/X86/or-lea.ll
@@ -9,8 +9,8 @@
define i32 @or_shift1_and1(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift1_and1:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %esi killed %esi def %rsi
-; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: # kill: def $esi killed $esi def $rsi
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: leal (%rsi,%rdi,2), %eax
; CHECK-NEXT: retq
@@ -24,8 +24,8 @@
define i32 @or_shift1_and1_swapped(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift1_and1_swapped:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %esi killed %esi def %rsi
-; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: # kill: def $esi killed $esi def $rsi
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: leal (%rsi,%rdi,2), %eax
; CHECK-NEXT: retq
@@ -39,8 +39,8 @@
define i32 @or_shift2_and1(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift2_and1:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %esi killed %esi def %rsi
-; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: # kill: def $esi killed $esi def $rsi
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: leal (%rsi,%rdi,4), %eax
; CHECK-NEXT: retq
@@ -54,8 +54,8 @@
define i32 @or_shift3_and1(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift3_and1:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %esi killed %esi def %rsi
-; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: # kill: def $esi killed $esi def $rsi
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: leal (%rsi,%rdi,8), %eax
; CHECK-NEXT: retq
@@ -69,8 +69,8 @@
define i32 @or_shift3_and7(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift3_and7:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %esi killed %esi def %rsi
-; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: # kill: def $esi killed $esi def $rsi
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
; CHECK-NEXT: andl $7, %esi
; CHECK-NEXT: leal (%rsi,%rdi,8), %eax
; CHECK-NEXT: retq
@@ -86,8 +86,8 @@
define i32 @or_shift4_and1(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift4_and1:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %esi killed %esi def %rsi
-; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: # kill: def $esi killed $esi def $rsi
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
; CHECK-NEXT: shll $4, %edi
; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: leal (%rsi,%rdi), %eax
@@ -104,7 +104,7 @@
define i32 @or_shift3_and8(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift3_and8:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
; CHECK-NEXT: leal (,%rdi,8), %eax
; CHECK-NEXT: andl $8, %esi
; CHECK-NEXT: orl %esi, %eax
diff --git a/llvm/test/CodeGen/X86/patchpoint-verifiable.mir b/llvm/test/CodeGen/X86/patchpoint-verifiable.mir
index 119deff..54f39e1 100644
--- a/llvm/test/CodeGen/X86/patchpoint-verifiable.mir
+++ b/llvm/test/CodeGen/X86/patchpoint-verifiable.mir
@@ -17,8 +17,8 @@
name: small_patchpoint_codegen
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%rsi' }
+ - { reg: '$rdi' }
+ - { reg: '$rsi' }
frameInfo:
hasPatchPoint: true
stackSize: 8
@@ -28,15 +28,15 @@
- { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16 }
body: |
bb.0.entry:
- liveins: %rdi, %rsi, %rbp
+ liveins: $rdi, $rsi, $rbp
- frame-setup PUSH64r killed %rbp, implicit-def %rsp, implicit %rsp
+ frame-setup PUSH64r killed $rbp, implicit-def $rsp, implicit $rsp
CFI_INSTRUCTION def_cfa_offset 16
- CFI_INSTRUCTION offset %rbp, -16
- %rbp = frame-setup MOV64rr %rsp
- CFI_INSTRUCTION def_cfa_register %rbp
- ; CHECK: PATCHPOINT 5, 5, 0, 2, 0, %rdi, %rsi, csr_64, implicit-def dead early-clobber %r11, implicit-def %rsp, implicit-def dead %rax
- PATCHPOINT 5, 5, 0, 2, 0, %rdi, %rsi, csr_64, implicit-def dead early-clobber %r11, implicit-def %rsp, implicit-def dead %rax
- %rbp = POP64r implicit-def %rsp, implicit %rsp
+ CFI_INSTRUCTION offset $rbp, -16
+ $rbp = frame-setup MOV64rr $rsp
+ CFI_INSTRUCTION def_cfa_register $rbp
+ ; CHECK: PATCHPOINT 5, 5, 0, 2, 0, $rdi, $rsi, csr_64, implicit-def dead early-clobber $r11, implicit-def $rsp, implicit-def dead $rax
+ PATCHPOINT 5, 5, 0, 2, 0, $rdi, $rsi, csr_64, implicit-def dead early-clobber $r11, implicit-def $rsp, implicit-def dead $rax
+ $rbp = POP64r implicit-def $rsp, implicit $rsp
RETQ
...
diff --git a/llvm/test/CodeGen/X86/peephole-recurrence.mir b/llvm/test/CodeGen/X86/peephole-recurrence.mir
index 3fc8b2a..81363a0a 100644
--- a/llvm/test/CodeGen/X86/peephole-recurrence.mir
+++ b/llvm/test/CodeGen/X86/peephole-recurrence.mir
@@ -86,14 +86,14 @@
- { id: 11, class: gr32, preferred-register: '' }
- { id: 12, class: gr32, preferred-register: '' }
liveins:
- - { reg: '%edi', virtual-reg: '%4' }
+ - { reg: '$edi', virtual-reg: '%4' }
body: |
bb.0.bb0:
successors: %bb.1(0x80000000)
- liveins: %edi
+ liveins: $edi
- %4 = COPY %edi
- %5 = MOV32r0 implicit-def dead %eflags
+ %4 = COPY $edi
+ %5 = MOV32r0 implicit-def dead $eflags
bb.1.bb1:
successors: %bb.3(0x30000000), %bb.2(0x50000000)
@@ -101,8 +101,8 @@
; CHECK: %0:gr32 = PHI %5, %bb.0, %3, %bb.5
%0 = PHI %5, %bb.0, %3, %bb.5
%6 = MOV32ri 1
- TEST32rr %4, %4, implicit-def %eflags
- JE_1 %bb.3, implicit %eflags
+ TEST32rr %4, %4, implicit-def $eflags
+ JE_1 %bb.3, implicit $eflags
JMP_1 %bb.2
bb.2.bb3:
@@ -114,8 +114,8 @@
successors: %bb.5(0x30000000), %bb.4(0x50000000)
%1 = PHI %6, %bb.1, %7, %bb.2
- TEST32rr %1, %1, implicit-def %eflags
- JE_1 %bb.5, implicit %eflags
+ TEST32rr %1, %1, implicit-def $eflags
+ JE_1 %bb.5, implicit $eflags
JMP_1 %bb.4
bb.4.bb6:
@@ -127,22 +127,22 @@
successors: %bb.1(0x7c000000), %bb.6(0x04000000)
%2 = PHI %6, %bb.3, %9, %bb.4
- %10 = ADD32rr %1, %0, implicit-def dead %eflags
+ %10 = ADD32rr %1, %0, implicit-def dead $eflags
; CHECK: %10:gr32 = ADD32rr
; CHECK-SAME: %0,
; CHECK-SAME: %1,
- %3 = ADD32rr %2, killed %10, implicit-def dead %eflags
+ %3 = ADD32rr %2, killed %10, implicit-def dead $eflags
; CHECK: %3:gr32 = ADD32rr
; CHECK-SAME: %10,
; CHECK-SAME: %2,
- %11 = SUB32ri8 %3, 10, implicit-def %eflags
- JL_1 %bb.1, implicit %eflags
+ %11 = SUB32ri8 %3, 10, implicit-def $eflags
+ JL_1 %bb.1, implicit $eflags
JMP_1 %bb.6
bb.6.bb8:
- %12 = MOV32r0 implicit-def dead %eflags
- %eax = COPY %12
- RET 0, %eax
+ %12 = MOV32r0 implicit-def dead $eflags
+ $eax = COPY %12
+ RET 0, $eax
...
---
@@ -168,16 +168,16 @@
- { id: 12, class: gr32, preferred-register: '' }
- { id: 13, class: gr32, preferred-register: '' }
liveins:
- - { reg: '%edi', virtual-reg: '%4' }
- - { reg: '%rsi', virtual-reg: '%5' }
+ - { reg: '$edi', virtual-reg: '%4' }
+ - { reg: '$rsi', virtual-reg: '%5' }
body: |
bb.0.bb0:
successors: %bb.1(0x80000000)
- liveins: %edi, %rsi
+ liveins: $edi, $rsi
- %5 = COPY %rsi
- %4 = COPY %edi
- %6 = MOV32r0 implicit-def dead %eflags
+ %5 = COPY $rsi
+ %4 = COPY $edi
+ %6 = MOV32r0 implicit-def dead $eflags
bb.1.bb1:
successors: %bb.3(0x30000000), %bb.2(0x50000000)
@@ -185,8 +185,8 @@
%0 = PHI %6, %bb.0, %3, %bb.5
; CHECK: %0:gr32 = PHI %6, %bb.0, %3, %bb.5
%7 = MOV32ri 1
- TEST32rr %4, %4, implicit-def %eflags
- JE_1 %bb.3, implicit %eflags
+ TEST32rr %4, %4, implicit-def $eflags
+ JE_1 %bb.3, implicit $eflags
JMP_1 %bb.2
bb.2.bb3:
@@ -198,8 +198,8 @@
successors: %bb.5(0x30000000), %bb.4(0x50000000)
%1 = PHI %7, %bb.1, %8, %bb.2
- TEST32rr %1, %1, implicit-def %eflags
- JE_1 %bb.5, implicit %eflags
+ TEST32rr %1, %1, implicit-def $eflags
+ JE_1 %bb.5, implicit $eflags
JMP_1 %bb.4
bb.4.bb6:
@@ -211,22 +211,22 @@
successors: %bb.1(0x7c000000), %bb.6(0x04000000)
%2 = PHI %7, %bb.3, %10, %bb.4
- %11 = ADD32rr %1, %0, implicit-def dead %eflags
+ %11 = ADD32rr %1, %0, implicit-def dead $eflags
; CHECK: %11:gr32 = ADD32rr
; CHECK-SAME: %1,
; CHECK-SAME: %0,
- MOV32mr %5, 1, %noreg, 0, %noreg, %0 :: (store 4 into %ir.p)
- %3 = ADD32rr %2, killed %11, implicit-def dead %eflags
+ MOV32mr %5, 1, $noreg, 0, $noreg, %0 :: (store 4 into %ir.p)
+ %3 = ADD32rr %2, killed %11, implicit-def dead $eflags
; CHECK: %3:gr32 = ADD32rr
; CHECK-SAME: %2,
; CHECK-SAME: %11,
- %12 = SUB32ri8 %3, 10, implicit-def %eflags
- JL_1 %bb.1, implicit %eflags
+ %12 = SUB32ri8 %3, 10, implicit-def $eflags
+ JL_1 %bb.1, implicit $eflags
JMP_1 %bb.6
bb.6.bb8:
- %13 = MOV32r0 implicit-def dead %eflags
- %eax = COPY %13
- RET 0, %eax
+ %13 = MOV32r0 implicit-def dead $eflags
+ $eax = COPY %13
+ RET 0, $eax
...
diff --git a/llvm/test/CodeGen/X86/pmul.ll b/llvm/test/CodeGen/X86/pmul.ll
index 56cee30..26cba32 100644
--- a/llvm/test/CodeGen/X86/pmul.ll
+++ b/llvm/test/CodeGen/X86/pmul.ll
@@ -63,7 +63,7 @@
; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
entry:
@@ -206,7 +206,7 @@
; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
entry:
diff --git a/llvm/test/CodeGen/X86/popcnt-schedule.ll b/llvm/test/CodeGen/X86/popcnt-schedule.ll
index b6ee5a9..a5cdfca7 100644
--- a/llvm/test/CodeGen/X86/popcnt-schedule.ll
+++ b/llvm/test/CodeGen/X86/popcnt-schedule.ll
@@ -17,7 +17,7 @@
; GENERIC-NEXT: popcntw (%rsi), %cx # sched: [9:1.00]
; GENERIC-NEXT: popcntw %di, %ax # sched: [3:1.00]
; GENERIC-NEXT: orl %ecx, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
+; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_ctpop_i16:
@@ -25,7 +25,7 @@
; SLM-NEXT: popcntw (%rsi), %cx # sched: [6:1.00]
; SLM-NEXT: popcntw %di, %ax # sched: [3:1.00]
; SLM-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; SLM-NEXT: # kill: def %ax killed %ax killed %eax
+; SLM-NEXT: # kill: def $ax killed $ax killed $eax
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_ctpop_i16:
@@ -33,7 +33,7 @@
; SANDY-NEXT: popcntw (%rsi), %cx # sched: [9:1.00]
; SANDY-NEXT: popcntw %di, %ax # sched: [3:1.00]
; SANDY-NEXT: orl %ecx, %eax # sched: [1:0.33]
-; SANDY-NEXT: # kill: def %ax killed %ax killed %eax
+; SANDY-NEXT: # kill: def $ax killed $ax killed $eax
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_ctpop_i16:
@@ -41,7 +41,7 @@
; HASWELL-NEXT: popcntw (%rsi), %cx # sched: [8:1.00]
; HASWELL-NEXT: popcntw %di, %ax # sched: [3:1.00]
; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; HASWELL-NEXT: # kill: def %ax killed %ax killed %eax
+; HASWELL-NEXT: # kill: def $ax killed $ax killed $eax
; HASWELL-NEXT: retq # sched: [7:1.00]
;
; BROADWELL-LABEL: test_ctpop_i16:
@@ -49,7 +49,7 @@
; BROADWELL-NEXT: popcntw (%rsi), %cx # sched: [8:1.00]
; BROADWELL-NEXT: popcntw %di, %ax # sched: [3:1.00]
; BROADWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; BROADWELL-NEXT: # kill: def %ax killed %ax killed %eax
+; BROADWELL-NEXT: # kill: def $ax killed $ax killed $eax
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_ctpop_i16:
@@ -57,7 +57,7 @@
; SKYLAKE-NEXT: popcntw (%rsi), %cx # sched: [8:1.00]
; SKYLAKE-NEXT: popcntw %di, %ax # sched: [3:1.00]
; SKYLAKE-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; SKYLAKE-NEXT: # kill: def %ax killed %ax killed %eax
+; SKYLAKE-NEXT: # kill: def $ax killed $ax killed $eax
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_ctpop_i16:
@@ -65,7 +65,7 @@
; BTVER2-NEXT: popcntw (%rsi), %cx # sched: [8:1.00]
; BTVER2-NEXT: popcntw %di, %ax # sched: [3:1.00]
; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; BTVER2-NEXT: # kill: def %ax killed %ax killed %eax
+; BTVER2-NEXT: # kill: def $ax killed $ax killed $eax
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_ctpop_i16:
@@ -73,7 +73,7 @@
; ZNVER1-NEXT: popcntw (%rsi), %cx # sched: [10:1.00]
; ZNVER1-NEXT: popcntw %di, %ax # sched: [3:1.00]
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: # kill: def %ax killed %ax killed %eax
+; ZNVER1-NEXT: # kill: def $ax killed $ax killed $eax
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i16, i16 *%a1
%2 = tail call i16 @llvm.ctpop.i16( i16 %1 )
diff --git a/llvm/test/CodeGen/X86/popcnt.ll b/llvm/test/CodeGen/X86/popcnt.ll
index d11a676..8fa8154 100644
--- a/llvm/test/CodeGen/X86/popcnt.ll
+++ b/llvm/test/CodeGen/X86/popcnt.ll
@@ -44,14 +44,14 @@
; X32-POPCNT: # %bb.0:
; X32-POPCNT-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-POPCNT-NEXT: popcntl %eax, %eax
-; X32-POPCNT-NEXT: # kill: def %al killed %al killed %eax
+; X32-POPCNT-NEXT: # kill: def $al killed $al killed $eax
; X32-POPCNT-NEXT: retl
;
; X64-POPCNT-LABEL: cnt8:
; X64-POPCNT: # %bb.0:
; X64-POPCNT-NEXT: movzbl %dil, %eax
; X64-POPCNT-NEXT: popcntl %eax, %eax
-; X64-POPCNT-NEXT: # kill: def %al killed %al killed %eax
+; X64-POPCNT-NEXT: # kill: def $al killed $al killed $eax
; X64-POPCNT-NEXT: retq
%cnt = tail call i8 @llvm.ctpop.i8(i8 %x)
ret i8 %cnt
@@ -78,7 +78,7 @@
; X32-NEXT: shll $8, %eax
; X32-NEXT: addl %ecx, %eax
; X32-NEXT: movzbl %ah, %eax
-; X32-NEXT: # kill: def %ax killed %ax killed %eax
+; X32-NEXT: # kill: def $ax killed $ax killed $eax
; X32-NEXT: retl
;
; X64-LABEL: cnt16:
@@ -100,7 +100,7 @@
; X64-NEXT: shll $8, %ecx
; X64-NEXT: addl %eax, %ecx
; X64-NEXT: movzbl %ch, %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
;
; X32-POPCNT-LABEL: cnt16:
diff --git a/llvm/test/CodeGen/X86/post-ra-sched-with-debug.mir b/llvm/test/CodeGen/X86/post-ra-sched-with-debug.mir
index 4132159..b87b4f6 100644
--- a/llvm/test/CodeGen/X86/post-ra-sched-with-debug.mir
+++ b/llvm/test/CodeGen/X86/post-ra-sched-with-debug.mir
@@ -237,91 +237,91 @@
name: _ZN1sC2Ei
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%esi' }
+ - { reg: '$rdi' }
+ - { reg: '$esi' }
fixedStack:
- - { id: 0, type: spill-slot, offset: -32, size: 8, alignment: 16, callee-saved-register: '%rbx' }
- - { id: 1, type: spill-slot, offset: -24, size: 8, alignment: 8, callee-saved-register: '%r14' }
+ - { id: 0, type: spill-slot, offset: -32, size: 8, alignment: 16, callee-saved-register: '$rbx' }
+ - { id: 1, type: spill-slot, offset: -24, size: 8, alignment: 8, callee-saved-register: '$r14' }
- { id: 2, type: spill-slot, offset: -16, size: 8, alignment: 16 }
stack:
- { id: 0, offset: -36, size: 4, alignment: 4 }
body: |
bb.0:
successors: %bb.3, %bb.2
- liveins: %esi, %rdi, %r14, %rbx, %rbp
+ liveins: $esi, $rdi, $r14, $rbx, $rbp
- ; CHECK: [[REGISTER:%r[a-z0-9]+]] = LEA64r {{%r[a-z0-9]+}}, 1, %noreg, -20, %noreg
- ; CHECK-NEXT: DBG_VALUE debug-use [[REGISTER]], debug-use %noreg, ![[J_VAR]], !DIExpression(), debug-location ![[J_LOC]]
- ; CHECK-NEXT: DBG_VALUE debug-use [[REGISTER]], debug-use %noreg, ![[I_VAR]], !DIExpression(), debug-location ![[I_LOC]]
+ ; CHECK: [[REGISTER:\$r[a-z0-9]+]] = LEA64r {{\$r[a-z0-9]+}}, 1, $noreg, -20, $noreg
+ ; CHECK-NEXT: DBG_VALUE debug-use [[REGISTER]], debug-use $noreg, ![[J_VAR]], !DIExpression(), debug-location ![[J_LOC]]
+ ; CHECK-NEXT: DBG_VALUE debug-use [[REGISTER]], debug-use $noreg, ![[I_VAR]], !DIExpression(), debug-location ![[I_LOC]]
- frame-setup PUSH64r killed %rbp, implicit-def %rsp, implicit %rsp
+ frame-setup PUSH64r killed $rbp, implicit-def $rsp, implicit $rsp
CFI_INSTRUCTION def_cfa_offset 16
- CFI_INSTRUCTION offset %rbp, -16
- %rbp = frame-setup MOV64rr %rsp
- CFI_INSTRUCTION def_cfa_register %rbp
- frame-setup PUSH64r killed %r14, implicit-def %rsp, implicit %rsp
- frame-setup PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp
- %rsp = frame-setup SUB64ri8 %rsp, 16, implicit-def dead %eflags
- CFI_INSTRUCTION offset %rbx, -32
- CFI_INSTRUCTION offset %r14, -24
- %r14d = MOV32rr %esi
- %rbx = MOV64rr %rdi
- CALL64pcrel32 @_ZN1lC2Ei, csr_64, implicit %rsp, implicit %rdi, implicit %esi, implicit-def %rsp
- %rdi = LEA64r %rbx, 1, %noreg, 8, %noreg
- DBG_VALUE debug-use %rdi, debug-use %noreg, !20, !17, debug-location !27
- DBG_VALUE debug-use %rdi, debug-use %noreg, !10, !17, debug-location !18
- %rax = MOV64rm %rbx, 1, %noreg, 16, %noreg :: (load 8)
- MOV64mr %rbx, 1, %noreg, 8, %noreg, killed %rax :: (store 8)
- MOV64mr %rbx, 1, %noreg, 24, %noreg, %rdi :: (store 8)
- %eax = MOV32ri -1
- %cl = MOV8rr %r14b, implicit killed %r14d
- %eax = SHL32rCL killed %eax, implicit-def dead %eflags, implicit %cl
- MOV32mr %rbx, 1, %noreg, 32, %noreg, %eax :: (store 4, align 8)
- MOV32mi %rbp, 1, %noreg, -20, %noreg, 0 :: (store 4)
- %rcx = MOV64rm %rbx, 1, %noreg, 8, %noreg :: (load 8)
- MOV64mr %rip, 1, %noreg, @n, %noreg, %rcx :: (store 8)
- %edx = XOR32rr undef %edx, undef %edx, implicit-def dead %eflags, implicit-def %rdx
- TEST64rr %rcx, %rcx, implicit-def %eflags
- %esi = MOV32ri @o, implicit-def %rsi
- %rsi = CMOVNE64rr killed %rsi, %rdx, implicit killed %eflags
- %rsi = OR64rr killed %rsi, killed %rcx, implicit-def %eflags
- %rcx = LEA64r %rbp, 1, %noreg, -20, %noreg
- DBG_VALUE debug-use %rcx, debug-use %noreg, !46, !17, debug-location !48
- DBG_VALUE debug-use %rcx, debug-use %noreg, !39, !17, debug-location !44
- DBG_VALUE %rbp, -20, !29, !17, debug-location !36
- %rcx = CMOVNE64rr killed %rcx, killed %rdx, implicit killed %eflags
- %rcx = OR64rr killed %rcx, killed %rsi, implicit-def dead %eflags
- %rdx = MOVSX64rm32 %rbx, 1, %noreg, 0, %noreg :: (load 4, align 8)
- TEST32mr killed %rcx, 4, killed %rdx, 0, %noreg, killed %eax, implicit-def %eflags :: (load 4)
- JNE_1 %bb.2, implicit %eflags
+ CFI_INSTRUCTION offset $rbp, -16
+ $rbp = frame-setup MOV64rr $rsp
+ CFI_INSTRUCTION def_cfa_register $rbp
+ frame-setup PUSH64r killed $r14, implicit-def $rsp, implicit $rsp
+ frame-setup PUSH64r killed $rbx, implicit-def $rsp, implicit $rsp
+ $rsp = frame-setup SUB64ri8 $rsp, 16, implicit-def dead $eflags
+ CFI_INSTRUCTION offset $rbx, -32
+ CFI_INSTRUCTION offset $r14, -24
+ $r14d = MOV32rr $esi
+ $rbx = MOV64rr $rdi
+ CALL64pcrel32 @_ZN1lC2Ei, csr_64, implicit $rsp, implicit $rdi, implicit $esi, implicit-def $rsp
+ $rdi = LEA64r $rbx, 1, $noreg, 8, $noreg
+ DBG_VALUE debug-use $rdi, debug-use $noreg, !20, !17, debug-location !27
+ DBG_VALUE debug-use $rdi, debug-use $noreg, !10, !17, debug-location !18
+ $rax = MOV64rm $rbx, 1, $noreg, 16, $noreg :: (load 8)
+ MOV64mr $rbx, 1, $noreg, 8, $noreg, killed $rax :: (store 8)
+ MOV64mr $rbx, 1, $noreg, 24, $noreg, $rdi :: (store 8)
+ $eax = MOV32ri -1
+ $cl = MOV8rr $r14b, implicit killed $r14d
+ $eax = SHL32rCL killed $eax, implicit-def dead $eflags, implicit $cl
+ MOV32mr $rbx, 1, $noreg, 32, $noreg, $eax :: (store 4, align 8)
+ MOV32mi $rbp, 1, $noreg, -20, $noreg, 0 :: (store 4)
+ $rcx = MOV64rm $rbx, 1, $noreg, 8, $noreg :: (load 8)
+ MOV64mr $rip, 1, $noreg, @n, $noreg, $rcx :: (store 8)
+ $edx = XOR32rr undef $edx, undef $edx, implicit-def dead $eflags, implicit-def $rdx
+ TEST64rr $rcx, $rcx, implicit-def $eflags
+ $esi = MOV32ri @o, implicit-def $rsi
+ $rsi = CMOVNE64rr killed $rsi, $rdx, implicit killed $eflags
+ $rsi = OR64rr killed $rsi, killed $rcx, implicit-def $eflags
+ $rcx = LEA64r $rbp, 1, $noreg, -20, $noreg
+ DBG_VALUE debug-use $rcx, debug-use $noreg, !46, !17, debug-location !48
+ DBG_VALUE debug-use $rcx, debug-use $noreg, !39, !17, debug-location !44
+ DBG_VALUE $rbp, -20, !29, !17, debug-location !36
+ $rcx = CMOVNE64rr killed $rcx, killed $rdx, implicit killed $eflags
+ $rcx = OR64rr killed $rcx, killed $rsi, implicit-def dead $eflags
+ $rdx = MOVSX64rm32 $rbx, 1, $noreg, 0, $noreg :: (load 4, align 8)
+ TEST32mr killed $rcx, 4, killed $rdx, 0, $noreg, killed $eax, implicit-def $eflags :: (load 4)
+ JNE_1 %bb.2, implicit $eflags
JMP_1 %bb.3
bb.1:
successors: %bb.2
- liveins: %rbx, %rbp
+ liveins: $rbx, $rbp
- %rdi = MOV64rm %rbx, 1, %noreg, 24, %noreg :: (load 8)
+ $rdi = MOV64rm $rbx, 1, $noreg, 24, $noreg :: (load 8)
bb.2:
successors: %bb.1, %bb.3
- liveins: %rbx, %rbp, %rsp, %rdi
+ liveins: $rbx, $rbp, $rsp, $rdi
- CALL64pcrel32 @_ZN1p2aaEv, csr_64, implicit %rsp, implicit %rdi, implicit-def %rsp, implicit-def %eax
- %eax = KILL %eax, implicit-def %rax
- %ecx = LEA64_32r %rax, 1, %noreg, -1, %noreg, implicit-def %rcx
- %ecx = SHR32ri %ecx, 31, implicit-def dead %eflags, implicit killed %rcx, implicit-def %rcx
- %eax = LEA64_32r killed %rax, 1, killed %rcx, -1, %noreg
- %eax = SAR32r1 killed %eax, implicit-def dead %eflags
- CMP32mr %rbx, 1, %noreg, 0, %noreg, killed %eax, implicit-def %eflags :: (load 4, align 8), (load 4, align 8)
- JG_1 %bb.1, implicit killed %eflags
+ CALL64pcrel32 @_ZN1p2aaEv, csr_64, implicit $rsp, implicit $rdi, implicit-def $rsp, implicit-def $eax
+ $eax = KILL $eax, implicit-def $rax
+ $ecx = LEA64_32r $rax, 1, $noreg, -1, $noreg, implicit-def $rcx
+ $ecx = SHR32ri $ecx, 31, implicit-def dead $eflags, implicit killed $rcx, implicit-def $rcx
+ $eax = LEA64_32r killed $rax, 1, killed $rcx, -1, $noreg
+ $eax = SAR32r1 killed $eax, implicit-def dead $eflags
+ CMP32mr $rbx, 1, $noreg, 0, $noreg, killed $eax, implicit-def $eflags :: (load 4, align 8), (load 4, align 8)
+ JG_1 %bb.1, implicit killed $eflags
bb.3:
- liveins: %rbp
+ liveins: $rbp
- %rsp = ADD64ri8 %rsp, 16, implicit-def dead %eflags
- %rbx = POP64r implicit-def %rsp, implicit %rsp
- %r14 = POP64r implicit-def %rsp, implicit %rsp
- %rbp = POP64r implicit-def %rsp, implicit %rsp
+ $rsp = ADD64ri8 $rsp, 16, implicit-def dead $eflags
+ $rbx = POP64r implicit-def $rsp, implicit $rsp
+ $r14 = POP64r implicit-def $rsp, implicit $rsp
+ $rbp = POP64r implicit-def $rsp, implicit $rsp
RETQ
...
diff --git a/llvm/test/CodeGen/X86/pr22970.ll b/llvm/test/CodeGen/X86/pr22970.ll
index 4daa8d9..e909b76 100644
--- a/llvm/test/CodeGen/X86/pr22970.ll
+++ b/llvm/test/CodeGen/X86/pr22970.ll
@@ -13,7 +13,7 @@
;
; X64-LABEL: PR22970_i32:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %esi killed %esi def %rsi
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
; X64-NEXT: andl $4095, %esi # imm = 0xFFF
; X64-NEXT: movl 32(%rdi,%rsi,4), %eax
; X64-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/pr27681.mir b/llvm/test/CodeGen/X86/pr27681.mir
index 8e0296c..ecc5abf 100644
--- a/llvm/test/CodeGen/X86/pr27681.mir
+++ b/llvm/test/CodeGen/X86/pr27681.mir
@@ -15,69 +15,69 @@
frameInfo:
stackSize: 52
fixedStack:
- - { id: 0, type: spill-slot, offset: -20, size: 4, alignment: 4, callee-saved-register: '%esi' }
- - { id: 1, type: spill-slot, offset: -16, size: 4, alignment: 4, callee-saved-register: '%edi' }
- - { id: 2, type: spill-slot, offset: -12, size: 4, alignment: 4, callee-saved-register: '%ebx' }
- - { id: 3, type: spill-slot, offset: -8, size: 4, alignment: 4, callee-saved-register: '%ebp' }
+ - { id: 0, type: spill-slot, offset: -20, size: 4, alignment: 4, callee-saved-register: '$esi' }
+ - { id: 1, type: spill-slot, offset: -16, size: 4, alignment: 4, callee-saved-register: '$edi' }
+ - { id: 2, type: spill-slot, offset: -12, size: 4, alignment: 4, callee-saved-register: '$ebx' }
+ - { id: 3, type: spill-slot, offset: -8, size: 4, alignment: 4, callee-saved-register: '$ebp' }
stack:
- { id: 0, type: spill-slot, offset: -53, size: 1, alignment: 1 }
- { id: 1, type: spill-slot, offset: -48, size: 4, alignment: 4 }
- { id: 2, type: spill-slot, offset: -32, size: 4, alignment: 4 }
body: |
bb.0:
- liveins: %ebp, %ebx, %edi, %esi
+ liveins: $ebp, $ebx, $edi, $esi
- frame-setup PUSH32r killed %ebp, implicit-def %esp, implicit %esp
- frame-setup PUSH32r killed %ebx, implicit-def %esp, implicit %esp
- frame-setup PUSH32r killed %edi, implicit-def %esp, implicit %esp
- frame-setup PUSH32r killed %esi, implicit-def %esp, implicit %esp
- %esp = frame-setup SUB32ri8 %esp, 36, implicit-def dead %eflags
- %eax = MOV32ri 1
- %ebp = MOV32ri 2
- %ebx = MOV32ri 3
- %ecx = MOV32ri 4
- %edi = MOV32ri 5
- %edx = MOV32ri 6
+ frame-setup PUSH32r killed $ebp, implicit-def $esp, implicit $esp
+ frame-setup PUSH32r killed $ebx, implicit-def $esp, implicit $esp
+ frame-setup PUSH32r killed $edi, implicit-def $esp, implicit $esp
+ frame-setup PUSH32r killed $esi, implicit-def $esp, implicit $esp
+ $esp = frame-setup SUB32ri8 $esp, 36, implicit-def dead $eflags
+ $eax = MOV32ri 1
+ $ebp = MOV32ri 2
+ $ebx = MOV32ri 3
+ $ecx = MOV32ri 4
+ $edi = MOV32ri 5
+ $edx = MOV32ri 6
bb.1:
- liveins: %eax, %ebp, %ebx, %ecx, %edi, %edx
+ liveins: $eax, $ebp, $ebx, $ecx, $edi, $edx
- %ebp = SHR32rCL killed %ebp, implicit-def dead %eflags, implicit %cl
- %ebp = XOR32rr killed %ebp, killed %ebx, implicit-def dead %eflags
- TEST32rr %edx, %edx, implicit-def %eflags
- %cl = SETNEr implicit %eflags
- ; This %bl def is antidependent on the above use of %ebx
- %bl = MOV8rm %esp, 1, %noreg, 3, _ ; :: (load 1 from %stack.0)
- %cl = OR8rr killed %cl, %bl, implicit-def dead %eflags
- %esi = MOVZX32rr8 killed %cl
- %esi = ADD32rr killed %esi, killed %edi, implicit-def dead %eflags
- %ecx = MOV32rm %esp, 1, %noreg, 24, _ ; :: (load 4 from %stack.2)
- %edx = SAR32rCL killed %edx, implicit-def dead %eflags, implicit %cl
- TEST32rr killed %edx, %edx, implicit-def %eflags
- %cl = SETNEr implicit %eflags
- ; Verify that removal of the %bl antidependence does not use %ch
+ $ebp = SHR32rCL killed $ebp, implicit-def dead $eflags, implicit $cl
+ $ebp = XOR32rr killed $ebp, killed $ebx, implicit-def dead $eflags
+ TEST32rr $edx, $edx, implicit-def $eflags
+ $cl = SETNEr implicit $eflags
+ ; This %bl def is antidependent on the above use of $ebx
+ $bl = MOV8rm $esp, 1, $noreg, 3, _ ; :: (load 1 from %stack.0)
+ $cl = OR8rr killed $cl, $bl, implicit-def dead $eflags
+ $esi = MOVZX32rr8 killed $cl
+ $esi = ADD32rr killed $esi, killed $edi, implicit-def dead $eflags
+ $ecx = MOV32rm $esp, 1, $noreg, 24, _ ; :: (load 4 from %stack.2)
+ $edx = SAR32rCL killed $edx, implicit-def dead $eflags, implicit $cl
+ TEST32rr killed $edx, $edx, implicit-def $eflags
+ $cl = SETNEr implicit $eflags
+ ; Verify that removal of the $bl antidependence does not use $ch
; as a replacement register.
- ; CHECK: %cl = AND8rr killed %cl, killed %b
- %cl = AND8rr killed %cl, killed %bl, implicit-def dead %eflags
- CMP32ri8 %ebp, -1, implicit-def %eflags
- %edx = MOV32ri 0
- JE_1 %bb.3, implicit %eflags
+ ; CHECK: $cl = AND8rr killed $cl, killed $b
+ $cl = AND8rr killed $cl, killed $bl, implicit-def dead $eflags
+ CMP32ri8 $ebp, -1, implicit-def $eflags
+ $edx = MOV32ri 0
+ JE_1 %bb.3, implicit $eflags
bb.2:
- liveins: %cl, %eax, %ebp, %esi
+ liveins: $cl, $eax, $ebp, $esi
- OR32mr %esp, 1, %noreg, 8, %noreg, killed %eax, implicit-def %eflags ; :: (store 4 into %stack.1)
- %dl = SETNEr implicit %eflags, implicit-def %edx
+ OR32mr $esp, 1, $noreg, 8, $noreg, killed $eax, implicit-def $eflags ; :: (store 4 into %stack.1)
+ $dl = SETNEr implicit $eflags, implicit-def $edx
bb.3:
- liveins: %cl, %ebp, %edx, %esi
+ liveins: $cl, $ebp, $edx, $esi
- %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags
- %esp = ADD32ri8 %esp, 36, implicit-def dead %eflags
- %esi = POP32r implicit-def %esp, implicit %esp
- %edi = POP32r implicit-def %esp, implicit %esp
- %ebx = POP32r implicit-def %esp, implicit %esp
- %ebp = POP32r implicit-def %esp, implicit %esp
- RET 0, %eax
+ $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags
+ $esp = ADD32ri8 $esp, 36, implicit-def dead $eflags
+ $esi = POP32r implicit-def $esp, implicit $esp
+ $edi = POP32r implicit-def $esp, implicit $esp
+ $ebx = POP32r implicit-def $esp, implicit $esp
+ $ebp = POP32r implicit-def $esp, implicit $esp
+ RET 0, $eax
...
diff --git a/llvm/test/CodeGen/X86/pr28173.ll b/llvm/test/CodeGen/X86/pr28173.ll
index f181217..4cb2567 100644
--- a/llvm/test/CodeGen/X86/pr28173.ll
+++ b/llvm/test/CodeGen/X86/pr28173.ll
@@ -27,7 +27,7 @@
; CHECK: # %bb.0:
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: orl $65534, %eax # imm = 0xFFFE
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
br label %bb
@@ -45,7 +45,7 @@
; CHECK: # %bb.0:
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: orl $2, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
br label %bb
diff --git a/llvm/test/CodeGen/X86/pr28560.ll b/llvm/test/CodeGen/X86/pr28560.ll
index d9da9ac..44964cc 100644
--- a/llvm/test/CodeGen/X86/pr28560.ll
+++ b/llvm/test/CodeGen/X86/pr28560.ll
@@ -1,6 +1,6 @@
; RUN: llc -mtriple=i686-pc-linux -print-after=postrapseudos < %s 2>&1 | FileCheck %s
-; CHECK: MOV8rr %{{[a-d]}}l, implicit killed %e[[R:[a-d]]]x, implicit-def %e[[R]]x
+; CHECK: MOV8rr ${{[a-d]}}l, implicit killed $e[[R:[a-d]]]x, implicit-def $e[[R]]x
define i32 @foo(i32 %i, i32 %k, i8* %p) {
%f = icmp ne i32 %i, %k
%s = zext i1 %f to i8
diff --git a/llvm/test/CodeGen/X86/pr29061.ll b/llvm/test/CodeGen/X86/pr29061.ll
index 9c29429..d16e453 100644
--- a/llvm/test/CodeGen/X86/pr29061.ll
+++ b/llvm/test/CodeGen/X86/pr29061.ll
@@ -11,7 +11,7 @@
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: .cfi_offset %edi, -8
; CHECK-NEXT: movzbl {{[0-9]+}}(%esp), %edi
-; CHECK-NEXT: # kill: def %di killed %di killed %edi
+; CHECK-NEXT: # kill: def $di killed $di killed $edi
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: popl %edi
@@ -28,7 +28,7 @@
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: .cfi_offset %esi, -8
; CHECK-NEXT: movzbl {{[0-9]+}}(%esp), %esi
-; CHECK-NEXT: # kill: def %si killed %si killed %esi
+; CHECK-NEXT: # kill: def $si killed $si killed $esi
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: popl %esi
diff --git a/llvm/test/CodeGen/X86/pr30430.ll b/llvm/test/CodeGen/X86/pr30430.ll
index 816fe23..30c2e59 100644
--- a/llvm/test/CodeGen/X86/pr30430.ll
+++ b/llvm/test/CodeGen/X86/pr30430.ll
@@ -73,7 +73,7 @@
; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
; CHECK-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
-; CHECK-NEXT: # implicit-def: %ymm2
+; CHECK-NEXT: # implicit-def: $ymm2
; CHECK-NEXT: vmovaps %xmm1, %xmm2
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm2
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -90,10 +90,10 @@
; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
; CHECK-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[0]
-; CHECK-NEXT: # implicit-def: %ymm3
+; CHECK-NEXT: # implicit-def: $ymm3
; CHECK-NEXT: vmovaps %xmm1, %xmm3
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm3
-; CHECK-NEXT: # implicit-def: %zmm24
+; CHECK-NEXT: # implicit-def: $zmm24
; CHECK-NEXT: vmovaps %zmm3, %zmm24
; CHECK-NEXT: vinsertf64x4 $1, %ymm2, %zmm24, %zmm24
; CHECK-NEXT: vmovaps %zmm24, {{[0-9]+}}(%rsp)
diff --git a/llvm/test/CodeGen/X86/pr32282.ll b/llvm/test/CodeGen/X86/pr32282.ll
index 1c4d48d..6da2ae0 100644
--- a/llvm/test/CodeGen/X86/pr32282.ll
+++ b/llvm/test/CodeGen/X86/pr32282.ll
@@ -64,7 +64,7 @@
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: xorl %edx, %edx
; X64-NEXT: divl %ecx
-; X64-NEXT: # kill: def %eax killed %eax def %rax
+; X64-NEXT: # kill: def $eax killed $eax def $rax
; X64-NEXT: .LBB0_3:
; X64-NEXT: testq %rax, %rax
; X64-NEXT: setne -{{[0-9]+}}(%rsp)
diff --git a/llvm/test/CodeGen/X86/pr32284.ll b/llvm/test/CodeGen/X86/pr32284.ll
index 86bb7405..9368303 100644
--- a/llvm/test/CodeGen/X86/pr32284.ll
+++ b/llvm/test/CodeGen/X86/pr32284.ll
@@ -308,7 +308,7 @@
define void @f2() {
; X86-O0-LABEL: f2:
; X86-O0: # %bb.0: # %entry
-; X86-O0-NEXT: # implicit-def: %rax
+; X86-O0-NEXT: # implicit-def: $rax
; X86-O0-NEXT: movzbl var_7, %ecx
; X86-O0-NEXT: cmpb $0, var_7
; X86-O0-NEXT: setne %dl
@@ -361,7 +361,7 @@
; 686-O0-NEXT: .cfi_def_cfa_offset 14
; 686-O0-NEXT: .cfi_offset %esi, -12
; 686-O0-NEXT: .cfi_offset %edi, -8
-; 686-O0-NEXT: # implicit-def: %eax
+; 686-O0-NEXT: # implicit-def: $eax
; 686-O0-NEXT: movzbl var_7, %ecx
; 686-O0-NEXT: cmpb $0, var_7
; 686-O0-NEXT: setne %dl
diff --git a/llvm/test/CodeGen/X86/pr32329.ll b/llvm/test/CodeGen/X86/pr32329.ll
index f6c3b5c..38d3206 100644
--- a/llvm/test/CodeGen/X86/pr32329.ll
+++ b/llvm/test/CodeGen/X86/pr32329.ll
@@ -78,7 +78,7 @@
; X64-NEXT: imull %esi, %ecx
; X64-NEXT: addl $-1437483407, %ecx # imm = 0xAA51BE71
; X64-NEXT: movl $9, %edx
-; X64-NEXT: # kill: def %cl killed %cl killed %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
; X64-NEXT: shlq %cl, %rdx
; X64-NEXT: movq %rdx, {{.*}}(%rip)
; X64-NEXT: cmpl %eax, %esi
diff --git a/llvm/test/CodeGen/X86/pr32345.ll b/llvm/test/CodeGen/X86/pr32345.ll
index 99666c9..4c34d0a 100644
--- a/llvm/test/CodeGen/X86/pr32345.ll
+++ b/llvm/test/CodeGen/X86/pr32345.ll
@@ -10,7 +10,7 @@
define void @foo() {
; X640-LABEL: foo:
; X640: # %bb.0: # %bb
-; X640-NEXT: # implicit-def: %rax
+; X640-NEXT: # implicit-def: $rax
; X640-NEXT: movzwl var_22, %ecx
; X640-NEXT: movzwl var_27, %edx
; X640-NEXT: xorl %edx, %ecx
@@ -27,8 +27,8 @@
; X640-NEXT: movzwl var_27, %ecx
; X640-NEXT: subl $16610, %ecx # imm = 0x40E2
; X640-NEXT: movl %ecx, %ecx
-; X640-NEXT: # kill: def %rcx killed %ecx
-; X640-NEXT: # kill: def %cl killed %rcx
+; X640-NEXT: # kill: def $rcx killed $ecx
+; X640-NEXT: # kill: def $cl killed $rcx
; X640-NEXT: sarq %cl, %rsi
; X640-NEXT: movb %sil, %cl
; X640-NEXT: movb %cl, (%rax)
@@ -49,12 +49,12 @@
; 6860-NEXT: .cfi_offset %esi, -20
; 6860-NEXT: .cfi_offset %edi, -16
; 6860-NEXT: .cfi_offset %ebx, -12
-; 6860-NEXT: # implicit-def: %eax
+; 6860-NEXT: # implicit-def: $eax
; 6860-NEXT: movw var_22, %cx
; 6860-NEXT: movzwl var_27, %edx
; 6860-NEXT: movw %dx, %si
; 6860-NEXT: xorw %si, %cx
-; 6860-NEXT: # implicit-def: %edi
+; 6860-NEXT: # implicit-def: $edi
; 6860-NEXT: movw %cx, %di
; 6860-NEXT: xorl %edx, %edi
; 6860-NEXT: movw %di, %cx
@@ -65,7 +65,7 @@
; 6860-NEXT: movzwl var_27, %edx
; 6860-NEXT: movw %dx, %si
; 6860-NEXT: xorw %si, %cx
-; 6860-NEXT: # implicit-def: %edi
+; 6860-NEXT: # implicit-def: $edi
; 6860-NEXT: movw %cx, %di
; 6860-NEXT: xorl %edx, %edi
; 6860-NEXT: movw %di, %cx
@@ -104,7 +104,7 @@
; X64-NEXT: movzwl %ax, %eax
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; X64-NEXT: addl $-16610, %ecx # imm = 0xBF1E
-; X64-NEXT: # kill: def %cl killed %cl killed %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
; X64-NEXT: shrq %cl, %rax
; X64-NEXT: movb %al, (%rax)
; X64-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/pr32484.ll b/llvm/test/CodeGen/X86/pr32484.ll
index dc67ec2..de28044 100644
--- a/llvm/test/CodeGen/X86/pr32484.ll
+++ b/llvm/test/CodeGen/X86/pr32484.ll
@@ -4,10 +4,10 @@
define void @foo() {
; CHECK-LABEL: foo:
; CHECK: # %bb.0:
-; CHECK-NEXT: # implicit-def: %rax
+; CHECK-NEXT: # implicit-def: $rax
; CHECK-NEXT: jmpq *%rax
; CHECK-NEXT: .LBB0_1:
-; CHECK-NEXT: # implicit-def: %rax
+; CHECK-NEXT: # implicit-def: $rax
; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: pcmpeqd %xmm1, %xmm1
; CHECK-NEXT: movdqu %xmm1, (%rax)
diff --git a/llvm/test/CodeGen/X86/pr34592.ll b/llvm/test/CodeGen/X86/pr34592.ll
index ff0a385..65dcb97 100644
--- a/llvm/test/CodeGen/X86/pr34592.ll
+++ b/llvm/test/CodeGen/X86/pr34592.ll
@@ -31,11 +31,11 @@
; CHECK-NEXT: vpalignr {{.*#+}} ymm8 = ymm0[8,9,10,11,12,13,14,15],ymm8[0,1,2,3,4,5,6,7],ymm0[24,25,26,27,28,29,30,31],ymm8[16,17,18,19,20,21,22,23]
; CHECK-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,2,0]
; CHECK-NEXT: vmovaps %xmm6, %xmm9
-; CHECK-NEXT: # implicit-def: %ymm11
+; CHECK-NEXT: # implicit-def: $ymm11
; CHECK-NEXT: vinserti128 $1, %xmm9, %ymm11, %ymm11
; CHECK-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5],ymm8[6,7]
; CHECK-NEXT: vmovaps %xmm0, %xmm9
-; CHECK-NEXT: # implicit-def: %ymm0
+; CHECK-NEXT: # implicit-def: $ymm0
; CHECK-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm0
; CHECK-NEXT: vpunpcklqdq {{.*#+}} ymm11 = ymm7[0],ymm2[0],ymm7[2],ymm2[2]
; CHECK-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,1,2,3]
diff --git a/llvm/test/CodeGen/X86/pr34653.ll b/llvm/test/CodeGen/X86/pr34653.ll
index 990cd9a..2edad66 100644
--- a/llvm/test/CodeGen/X86/pr34653.ll
+++ b/llvm/test/CodeGen/X86/pr34653.ll
@@ -64,7 +64,7 @@
; CHECK-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
; CHECK-NEXT: vpermilpd {{.*#+}} xmm11 = xmm11[1,0]
; CHECK-NEXT: vpermilpd {{.*#+}} xmm13 = xmm13[1,0]
-; CHECK-NEXT: # kill: def %ymm10 killed %ymm10 killed %zmm10
+; CHECK-NEXT: # kill: def $ymm10 killed $ymm10 killed $zmm10
; CHECK-NEXT: vextractf128 $1, %ymm10, %xmm10
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
; CHECK-NEXT: vmovaps %xmm10, %xmm0
@@ -75,7 +75,7 @@
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; CHECK-NEXT: # kill: def %ymm9 killed %ymm9 killed %zmm9
+; CHECK-NEXT: # kill: def $ymm9 killed $ymm9 killed $zmm9
; CHECK-NEXT: vextractf128 $1, %ymm9, %xmm9
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
; CHECK-NEXT: vmovaps %xmm9, %xmm0
@@ -88,7 +88,7 @@
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; CHECK-NEXT: # kill: def %ymm8 killed %ymm8 killed %zmm8
+; CHECK-NEXT: # kill: def $ymm8 killed $ymm8 killed $zmm8
; CHECK-NEXT: vextractf128 $1, %ymm8, %xmm8
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
; CHECK-NEXT: vmovaps %xmm8, %xmm0
@@ -101,7 +101,7 @@
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; CHECK-NEXT: # kill: def %ymm7 killed %ymm7 killed %zmm7
+; CHECK-NEXT: # kill: def $ymm7 killed $ymm7 killed $zmm7
; CHECK-NEXT: vextractf128 $1, %ymm7, %xmm7
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
; CHECK-NEXT: vmovaps %xmm7, %xmm0
diff --git a/llvm/test/CodeGen/X86/pr35765.ll b/llvm/test/CodeGen/X86/pr35765.ll
index 4d09745..6ff504d 100644
--- a/llvm/test/CodeGen/X86/pr35765.ll
+++ b/llvm/test/CodeGen/X86/pr35765.ll
@@ -12,7 +12,7 @@
; CHECK-NEXT: movzwl {{.*}}(%rip), %ecx
; CHECK-NEXT: addl $-1398, %ecx # imm = 0xFA8A
; CHECK-NEXT: movl $4, %eax
-; CHECK-NEXT: # kill: def %cl killed %cl killed %ecx
+; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
; CHECK-NEXT: shll %cl, %eax
; CHECK-NEXT: movzwl {{.*}}(%rip), %ecx
; CHECK-NEXT: movzwl {{.*}}(%rip), %edx
diff --git a/llvm/test/CodeGen/X86/pre-coalesce.mir b/llvm/test/CodeGen/X86/pre-coalesce.mir
index fb9429b..211b3a4 100644
--- a/llvm/test/CodeGen/X86/pre-coalesce.mir
+++ b/llvm/test/CodeGen/X86/pre-coalesce.mir
@@ -83,11 +83,11 @@
hasMustTailInVarArgFunc: false
body: |
bb.0.entry:
- %0 = MOV64rm %rip, 1, %noreg, @b, %noreg :: (dereferenceable load 8 from @b)
- %12 = MOV8rm %0, 1, %noreg, 0, %noreg :: (load 1 from %ir.t0)
- TEST8rr %12, %12, implicit-def %eflags
- %11 = MOV32rm %rip, 1, %noreg, @a, %noreg :: (dereferenceable load 4 from @a)
- JNE_1 %bb.1, implicit killed %eflags
+ %0 = MOV64rm $rip, 1, $noreg, @b, $noreg :: (dereferenceable load 8 from @b)
+ %12 = MOV8rm %0, 1, $noreg, 0, $noreg :: (load 1 from %ir.t0)
+ TEST8rr %12, %12, implicit-def $eflags
+ %11 = MOV32rm $rip, 1, $noreg, @a, $noreg :: (dereferenceable load 4 from @a)
+ JNE_1 %bb.1, implicit killed $eflags
bb.4:
%10 = COPY %11
@@ -98,18 +98,18 @@
bb.2.while.body:
%8 = MOVSX32rr8 %12
%10 = COPY %11
- %10 = SHL32ri %10, 5, implicit-def dead %eflags
- %10 = ADD32rr %10, %11, implicit-def dead %eflags
- %10 = ADD32rr %10, %8, implicit-def dead %eflags
- MOV32mr %rip, 1, %noreg, @a, %noreg, %10 :: (store 4 into @a)
- %12 = MOV8rm %0, 1, %noreg, 0, %noreg :: (load 1 from %ir.t0)
- TEST8rr %12, %12, implicit-def %eflags
+ %10 = SHL32ri %10, 5, implicit-def dead $eflags
+ %10 = ADD32rr %10, %11, implicit-def dead $eflags
+ %10 = ADD32rr %10, %8, implicit-def dead $eflags
+ MOV32mr $rip, 1, $noreg, @a, $noreg, %10 :: (store 4 into @a)
+ %12 = MOV8rm %0, 1, $noreg, 0, $noreg :: (load 1 from %ir.t0)
+ TEST8rr %12, %12, implicit-def $eflags
%11 = COPY %10
- JNE_1 %bb.2, implicit killed %eflags
+ JNE_1 %bb.2, implicit killed $eflags
JMP_1 %bb.3
bb.3.while.end:
- %eax = COPY %10
- RET 0, killed %eax
+ $eax = COPY %10
+ RET 0, killed $eax
...
diff --git a/llvm/test/CodeGen/X86/prefer-avx256-mask-extend.ll b/llvm/test/CodeGen/X86/prefer-avx256-mask-extend.ll
index 9f5b888..e78c1d2 100644
--- a/llvm/test/CodeGen/X86/prefer-avx256-mask-extend.ll
+++ b/llvm/test/CodeGen/X86/prefer-avx256-mask-extend.ll
@@ -30,7 +30,7 @@
; AVX512F-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX512F-NEXT: vpcmpeqd (%rdi), %ymm0, %ymm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
%in = load <8 x i32>, <8 x i32>* %p
diff --git a/llvm/test/CodeGen/X86/prefer-avx256-mask-shuffle.ll b/llvm/test/CodeGen/X86/prefer-avx256-mask-shuffle.ll
index 18b3b89..4f598e8 100644
--- a/llvm/test/CodeGen/X86/prefer-avx256-mask-shuffle.ll
+++ b/llvm/test/CodeGen/X86/prefer-avx256-mask-shuffle.ll
@@ -113,7 +113,7 @@
; AVX512BW-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
; AVX512BW-NEXT: vptestmd %zmm2, %zmm2, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -221,14 +221,14 @@
;
; AVX512BW-LABEL: shuf32i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vptestnmb %zmm0, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [3,6,22,12,3,7,7,0,3,6,1,13,3,21,7,0,3,6,22,12,3,7,7,0,3,6,1,13,3,21,7,0]
; AVX512BW-NEXT: vpermw %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: vpmovw2m %zmm0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
%cmp = icmp eq <32 x i8> %a, zeroinitializer
%b = shufflevector <32 x i1> %cmp, <32 x i1> undef, <32 x i32> <i32 3, i32 6, i32 22, i32 12, i32 3, i32 7, i32 7, i32 0, i32 3, i32 6, i32 1, i32 13, i32 3, i32 21, i32 7, i32 0, i32 3, i32 6, i32 22, i32 12, i32 3, i32 7, i32 7, i32 0, i32 3, i32 6, i32 1, i32 13, i32 3, i32 21, i32 7, i32 0>
diff --git a/llvm/test/CodeGen/X86/prefer-avx256-popcnt.ll b/llvm/test/CodeGen/X86/prefer-avx256-popcnt.ll
index dcfe053..06d4b6c 100644
--- a/llvm/test/CodeGen/X86/prefer-avx256-popcnt.ll
+++ b/llvm/test/CodeGen/X86/prefer-avx256-popcnt.ll
@@ -26,7 +26,7 @@
; AVX512F-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512F-NEXT: vpopcntd %zmm0, %zmm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
%out = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %in)
diff --git a/llvm/test/CodeGen/X86/prefer-avx256-shift.ll b/llvm/test/CodeGen/X86/prefer-avx256-shift.ll
index 1022007..3f81fd2 100644
--- a/llvm/test/CodeGen/X86/prefer-avx256-shift.ll
+++ b/llvm/test/CodeGen/X86/prefer-avx256-shift.ll
@@ -83,10 +83,10 @@
;
; AVX512BWNOVL-LABEL: var_shl_v16i16:
; AVX512BWNOVL: # %bb.0:
-; AVX512BWNOVL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512BWNOVL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BWNOVL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BWNOVL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BWNOVL-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
-; AVX512BWNOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BWNOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BWNOVL-NEXT: retq
%shift = shl <16 x i16> %a, %b
ret <16 x i16> %shift
@@ -141,7 +141,7 @@
; AVX512BWNOVL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX512BWNOVL-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
; AVX512BWNOVL-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BWNOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BWNOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BWNOVL-NEXT: vzeroupper
; AVX512BWNOVL-NEXT: retq
%shift = shl <16 x i8> %a, %b
@@ -227,10 +227,10 @@
;
; AVX512BWNOVL-LABEL: var_lshr_v16i16:
; AVX512BWNOVL: # %bb.0:
-; AVX512BWNOVL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512BWNOVL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BWNOVL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BWNOVL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BWNOVL-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512BWNOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BWNOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BWNOVL-NEXT: retq
%shift = lshr <16 x i16> %a, %b
ret <16 x i16> %shift
@@ -286,7 +286,7 @@
; AVX512BWNOVL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX512BWNOVL-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
; AVX512BWNOVL-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BWNOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BWNOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BWNOVL-NEXT: vzeroupper
; AVX512BWNOVL-NEXT: retq
%shift = lshr <16 x i8> %a, %b
@@ -396,10 +396,10 @@
;
; AVX512BWNOVL-LABEL: var_ashr_v16i16:
; AVX512BWNOVL: # %bb.0:
-; AVX512BWNOVL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512BWNOVL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BWNOVL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BWNOVL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BWNOVL-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512BWNOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BWNOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BWNOVL-NEXT: retq
%shift = ashr <16 x i16> %a, %b
ret <16 x i16> %shift
@@ -467,7 +467,7 @@
; AVX512BWNOVL-NEXT: vpmovsxbw %xmm0, %ymm0
; AVX512BWNOVL-NEXT: vpsravw %zmm1, %zmm0, %zmm0
; AVX512BWNOVL-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BWNOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BWNOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BWNOVL-NEXT: vzeroupper
; AVX512BWNOVL-NEXT: retq
%shift = ashr <16 x i8> %a, %b
diff --git a/llvm/test/CodeGen/X86/prefer-avx256-trunc.ll b/llvm/test/CodeGen/X86/prefer-avx256-trunc.ll
index b70fda2..447e6d1 100644
--- a/llvm/test/CodeGen/X86/prefer-avx256-trunc.ll
+++ b/llvm/test/CodeGen/X86/prefer-avx256-trunc.ll
@@ -28,9 +28,9 @@
;
; AVX512BW-LABEL: testv16i16_trunc_v16i8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/promote-vec3.ll b/llvm/test/CodeGen/X86/promote-vec3.ll
index 2719b8b..25e9f9f 100644
--- a/llvm/test/CodeGen/X86/promote-vec3.ll
+++ b/llvm/test/CodeGen/X86/promote-vec3.ll
@@ -17,9 +17,9 @@
; SSE3-NEXT: pextrw $0, %xmm0, %eax
; SSE3-NEXT: pextrw $1, %xmm0, %edx
; SSE3-NEXT: pextrw $2, %xmm0, %ecx
-; SSE3-NEXT: # kill: def %ax killed %ax killed %eax
-; SSE3-NEXT: # kill: def %dx killed %dx killed %edx
-; SSE3-NEXT: # kill: def %cx killed %cx killed %ecx
+; SSE3-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-NEXT: # kill: def $dx killed $dx killed $edx
+; SSE3-NEXT: # kill: def $cx killed $cx killed $ecx
; SSE3-NEXT: retl
;
; SSE41-LABEL: zext_i8:
@@ -31,9 +31,9 @@
; SSE41-NEXT: movd %xmm0, %eax
; SSE41-NEXT: pextrw $2, %xmm0, %edx
; SSE41-NEXT: pextrw $4, %xmm0, %ecx
-; SSE41-NEXT: # kill: def %ax killed %ax killed %eax
-; SSE41-NEXT: # kill: def %dx killed %dx killed %edx
-; SSE41-NEXT: # kill: def %cx killed %cx killed %ecx
+; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE41-NEXT: # kill: def $dx killed $dx killed $edx
+; SSE41-NEXT: # kill: def $cx killed $cx killed $ecx
; SSE41-NEXT: retl
;
; AVX-32-LABEL: zext_i8:
@@ -45,9 +45,9 @@
; AVX-32-NEXT: vmovd %xmm0, %eax
; AVX-32-NEXT: vpextrw $2, %xmm0, %edx
; AVX-32-NEXT: vpextrw $4, %xmm0, %ecx
-; AVX-32-NEXT: # kill: def %ax killed %ax killed %eax
-; AVX-32-NEXT: # kill: def %dx killed %dx killed %edx
-; AVX-32-NEXT: # kill: def %cx killed %cx killed %ecx
+; AVX-32-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-32-NEXT: # kill: def $dx killed $dx killed $edx
+; AVX-32-NEXT: # kill: def $cx killed $cx killed $ecx
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: zext_i8:
@@ -59,9 +59,9 @@
; AVX-64-NEXT: vmovd %xmm0, %eax
; AVX-64-NEXT: vpextrw $2, %xmm0, %edx
; AVX-64-NEXT: vpextrw $4, %xmm0, %ecx
-; AVX-64-NEXT: # kill: def %ax killed %ax killed %eax
-; AVX-64-NEXT: # kill: def %dx killed %dx killed %edx
-; AVX-64-NEXT: # kill: def %cx killed %cx killed %ecx
+; AVX-64-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-64-NEXT: # kill: def $dx killed $dx killed $edx
+; AVX-64-NEXT: # kill: def $cx killed $cx killed $ecx
; AVX-64-NEXT: retq
%2 = zext <3 x i8> %0 to <3 x i16>
ret <3 x i16> %2
@@ -83,9 +83,9 @@
; SSE3-NEXT: movd %xmm0, %eax
; SSE3-NEXT: pextrw $2, %xmm0, %edx
; SSE3-NEXT: pextrw $4, %xmm0, %ecx
-; SSE3-NEXT: # kill: def %ax killed %ax killed %eax
-; SSE3-NEXT: # kill: def %dx killed %dx killed %edx
-; SSE3-NEXT: # kill: def %cx killed %cx killed %ecx
+; SSE3-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-NEXT: # kill: def $dx killed $dx killed $edx
+; SSE3-NEXT: # kill: def $cx killed $cx killed $ecx
; SSE3-NEXT: retl
;
; SSE41-LABEL: sext_i8:
@@ -98,9 +98,9 @@
; SSE41-NEXT: movd %xmm0, %eax
; SSE41-NEXT: pextrw $2, %xmm0, %edx
; SSE41-NEXT: pextrw $4, %xmm0, %ecx
-; SSE41-NEXT: # kill: def %ax killed %ax killed %eax
-; SSE41-NEXT: # kill: def %dx killed %dx killed %edx
-; SSE41-NEXT: # kill: def %cx killed %cx killed %ecx
+; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE41-NEXT: # kill: def $dx killed $dx killed $edx
+; SSE41-NEXT: # kill: def $cx killed $cx killed $ecx
; SSE41-NEXT: retl
;
; AVX-32-LABEL: sext_i8:
@@ -113,9 +113,9 @@
; AVX-32-NEXT: vmovd %xmm0, %eax
; AVX-32-NEXT: vpextrw $2, %xmm0, %edx
; AVX-32-NEXT: vpextrw $4, %xmm0, %ecx
-; AVX-32-NEXT: # kill: def %ax killed %ax killed %eax
-; AVX-32-NEXT: # kill: def %dx killed %dx killed %edx
-; AVX-32-NEXT: # kill: def %cx killed %cx killed %ecx
+; AVX-32-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-32-NEXT: # kill: def $dx killed $dx killed $edx
+; AVX-32-NEXT: # kill: def $cx killed $cx killed $ecx
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: sext_i8:
@@ -128,9 +128,9 @@
; AVX-64-NEXT: vmovd %xmm0, %eax
; AVX-64-NEXT: vpextrw $2, %xmm0, %edx
; AVX-64-NEXT: vpextrw $4, %xmm0, %ecx
-; AVX-64-NEXT: # kill: def %ax killed %ax killed %eax
-; AVX-64-NEXT: # kill: def %dx killed %dx killed %edx
-; AVX-64-NEXT: # kill: def %cx killed %cx killed %ecx
+; AVX-64-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-64-NEXT: # kill: def $dx killed $dx killed $edx
+; AVX-64-NEXT: # kill: def $cx killed $cx killed $ecx
; AVX-64-NEXT: retq
%2 = sext <3 x i8> %0 to <3 x i16>
ret <3 x i16> %2
diff --git a/llvm/test/CodeGen/X86/psubus.ll b/llvm/test/CodeGen/X86/psubus.ll
index 71dff9f..d4bba1d 100644
--- a/llvm/test/CodeGen/X86/psubus.ll
+++ b/llvm/test/CodeGen/X86/psubus.ll
@@ -1896,7 +1896,7 @@
; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
@@ -1922,7 +1922,7 @@
; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/rdpid-schedule.ll b/llvm/test/CodeGen/X86/rdpid-schedule.ll
index 99042f4..53816fe 100644
--- a/llvm/test/CodeGen/X86/rdpid-schedule.ll
+++ b/llvm/test/CodeGen/X86/rdpid-schedule.ll
@@ -6,13 +6,13 @@
; GENERIC-LABEL: test_rdpid:
; GENERIC: # %bb.0:
; GENERIC-NEXT: rdpid %rax # sched: [100:0.33]
-; GENERIC-NEXT: # kill: def %eax killed %eax killed %rax
+; GENERIC-NEXT: # kill: def $eax killed $eax killed $rax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ICELAKE-LABEL: test_rdpid:
; ICELAKE: # %bb.0:
; ICELAKE-NEXT: rdpid %rax # sched: [100:0.25]
-; ICELAKE-NEXT: # kill: def %eax killed %eax killed %rax
+; ICELAKE-NEXT: # kill: def $eax killed $eax killed $rax
; ICELAKE-NEXT: retq # sched: [7:1.00]
%1 = tail call i32 @llvm.x86.rdpid()
ret i32 %1
diff --git a/llvm/test/CodeGen/X86/rdpid.ll b/llvm/test/CodeGen/X86/rdpid.ll
index 7eafb6c..ee44495 100644
--- a/llvm/test/CodeGen/X86/rdpid.ll
+++ b/llvm/test/CodeGen/X86/rdpid.ll
@@ -6,7 +6,7 @@
; X86-64-LABEL: test_builtin_rdpid:
; X86-64: # %bb.0:
; X86-64-NEXT: rdpid %rax
-; X86-64-NEXT: # kill: def %eax killed %eax killed %rax
+; X86-64-NEXT: # kill: def $eax killed $eax killed $rax
; X86-64-NEXT: retq
;
; X86-LABEL: test_builtin_rdpid:
diff --git a/llvm/test/CodeGen/X86/reduce-trunc-shl.ll b/llvm/test/CodeGen/X86/reduce-trunc-shl.ll
index 90fc282..08807b7 100644
--- a/llvm/test/CodeGen/X86/reduce-trunc-shl.ll
+++ b/llvm/test/CodeGen/X86/reduce-trunc-shl.ll
@@ -43,7 +43,7 @@
; AVX2-NEXT: vpslld $17, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
%shl = shl <8 x i32> %a, <i32 17, i32 17, i32 17, i32 17, i32 17, i32 17, i32 17, i32 17>
diff --git a/llvm/test/CodeGen/X86/regalloc-advanced-split-cost.ll b/llvm/test/CodeGen/X86/regalloc-advanced-split-cost.ll
index b3f4fad..4c50288 100644
--- a/llvm/test/CodeGen/X86/regalloc-advanced-split-cost.ll
+++ b/llvm/test/CodeGen/X86/regalloc-advanced-split-cost.ll
@@ -15,15 +15,15 @@
; Make sure the split behaves as expected
; CHECK: RS_Split Cascade 1
-; CHECK-NOT: %eax static =
-; CHECK: %eax no positive bundles
-; CHECK-NEXT: %ecx no positive bundles
-; CHECK-NEXT: %edx no positive bundles
-; CHECK-NEXT: %esi static =
-; CHECK-NEXT: %edi no positive bundles
-; CHECK-NEXT: %ebx no positive bundles
-; CHECK-NEXT: %ebp static =
-; CHECK: Split for %ebp
+; CHECK-NOT: $eax static =
+; CHECK: $eax no positive bundles
+; CHECK-NEXT: $ecx no positive bundles
+; CHECK-NEXT: $edx no positive bundles
+; CHECK-NEXT: $esi static =
+; CHECK-NEXT: $edi no positive bundles
+; CHECK-NEXT: $ebx no positive bundles
+; CHECK-NEXT: $ebp static =
+; CHECK: Split for $ebp
; Function Attrs: nounwind
define i32 @foo(i32* %array, i32 %cond1, i32 %val) local_unnamed_addr #0 {
diff --git a/llvm/test/CodeGen/X86/remat-phys-dead.ll b/llvm/test/CodeGen/X86/remat-phys-dead.ll
index 90bbe20..acf2441 100644
--- a/llvm/test/CodeGen/X86/remat-phys-dead.ll
+++ b/llvm/test/CodeGen/X86/remat-phys-dead.ll
@@ -9,7 +9,7 @@
define i8 @test_remat() {
ret i8 0
; CHECK: REGISTER COALESCING
-; CHECK: Remat: dead %eax = MOV32r0 implicit-def dead %eflags, implicit-def %al
+; CHECK: Remat: dead $eax = MOV32r0 implicit-def dead $eflags, implicit-def $al
}
; On the other hand, if it's already the correct width, we really shouldn't be
@@ -18,6 +18,6 @@
define i32 @test_remat32() {
ret i32 0
; CHECK: REGISTER COALESCING
-; CHECK: Remat: %eax = MOV32r0 implicit-def dead %eflags
+; CHECK: Remat: $eax = MOV32r0 implicit-def dead $eflags
}
diff --git a/llvm/test/CodeGen/X86/sar_fold64.ll b/llvm/test/CodeGen/X86/sar_fold64.ll
index 2c6229a..29eaee4 100644
--- a/llvm/test/CodeGen/X86/sar_fold64.ll
+++ b/llvm/test/CodeGen/X86/sar_fold64.ll
@@ -6,7 +6,7 @@
; CHECK: # %bb.0:
; CHECK-NEXT: movswq %di, %rax
; CHECK-NEXT: addl %eax, %eax
-; CHECK-NEXT: # kill: def %eax killed %eax killed %rax
+; CHECK-NEXT: # kill: def $eax killed $eax killed $rax
; CHECK-NEXT: retq
%1 = shl i64 %a, 48
%2 = ashr exact i64 %1, 47
@@ -19,7 +19,7 @@
; CHECK: # %bb.0:
; CHECK-NEXT: movswq %di, %rax
; CHECK-NEXT: shrq %rax
-; CHECK-NEXT: # kill: def %eax killed %eax killed %rax
+; CHECK-NEXT: # kill: def $eax killed $eax killed $rax
; CHECK-NEXT: retq
%1 = shl i64 %a, 48
%2 = ashr exact i64 %1, 49
@@ -32,7 +32,7 @@
; CHECK: # %bb.0:
; CHECK-NEXT: movsbq %dil, %rax
; CHECK-NEXT: addl %eax, %eax
-; CHECK-NEXT: # kill: def %eax killed %eax killed %rax
+; CHECK-NEXT: # kill: def $eax killed $eax killed $rax
; CHECK-NEXT: retq
%1 = shl i64 %a, 56
%2 = ashr exact i64 %1, 55
@@ -45,7 +45,7 @@
; CHECK: # %bb.0:
; CHECK-NEXT: movsbq %dil, %rax
; CHECK-NEXT: shrq %rax
-; CHECK-NEXT: # kill: def %eax killed %eax killed %rax
+; CHECK-NEXT: # kill: def $eax killed $eax killed $rax
; CHECK-NEXT: retq
%1 = shl i64 %a, 56
%2 = ashr exact i64 %1, 57
diff --git a/llvm/test/CodeGen/X86/scalar-fp-to-i64.ll b/llvm/test/CodeGen/X86/scalar-fp-to-i64.ll
index 612575a..cb2cce1 100644
--- a/llvm/test/CodeGen/X86/scalar-fp-to-i64.ll
+++ b/llvm/test/CodeGen/X86/scalar-fp-to-i64.ll
@@ -239,7 +239,7 @@
; X87_WIN-NEXT: fxch %st(1)
; X87_WIN-NEXT: fucomp %st(2)
; X87_WIN-NEXT: fnstsw %ax
-; X87_WIN-NEXT: # kill: def %ah killed %ah killed %ax
+; X87_WIN-NEXT: # kill: def $ah killed $ah killed $ax
; X87_WIN-NEXT: sahf
; X87_WIN-NEXT: ja LBB0_2
; X87_WIN-NEXT: # %bb.1:
@@ -273,7 +273,7 @@
; X87_LIN-NEXT: fxch %st(1)
; X87_LIN-NEXT: fucomp %st(2)
; X87_LIN-NEXT: fnstsw %ax
-; X87_LIN-NEXT: # kill: def %ah killed %ah killed %ax
+; X87_LIN-NEXT: # kill: def $ah killed $ah killed $ax
; X87_LIN-NEXT: sahf
; X87_LIN-NEXT: ja .LBB0_2
; X87_LIN-NEXT: # %bb.1:
@@ -655,7 +655,7 @@
; X87_WIN-NEXT: fxch %st(1)
; X87_WIN-NEXT: fucomp %st(2)
; X87_WIN-NEXT: fnstsw %ax
-; X87_WIN-NEXT: # kill: def %ah killed %ah killed %ax
+; X87_WIN-NEXT: # kill: def $ah killed $ah killed $ax
; X87_WIN-NEXT: sahf
; X87_WIN-NEXT: ja LBB2_2
; X87_WIN-NEXT: # %bb.1:
@@ -689,7 +689,7 @@
; X87_LIN-NEXT: fxch %st(1)
; X87_LIN-NEXT: fucomp %st(2)
; X87_LIN-NEXT: fnstsw %ax
-; X87_LIN-NEXT: # kill: def %ah killed %ah killed %ax
+; X87_LIN-NEXT: # kill: def $ah killed $ah killed $ax
; X87_LIN-NEXT: sahf
; X87_LIN-NEXT: ja .LBB2_2
; X87_LIN-NEXT: # %bb.1:
@@ -1176,7 +1176,7 @@
; X87_WIN-NEXT: fxch %st(1)
; X87_WIN-NEXT: fucomp %st(2)
; X87_WIN-NEXT: fnstsw %ax
-; X87_WIN-NEXT: # kill: def %ah killed %ah killed %ax
+; X87_WIN-NEXT: # kill: def $ah killed $ah killed $ax
; X87_WIN-NEXT: sahf
; X87_WIN-NEXT: ja LBB4_2
; X87_WIN-NEXT: # %bb.1:
@@ -1210,7 +1210,7 @@
; X87_LIN-NEXT: fxch %st(1)
; X87_LIN-NEXT: fucomp %st(2)
; X87_LIN-NEXT: fnstsw %ax
-; X87_LIN-NEXT: # kill: def %ah killed %ah killed %ax
+; X87_LIN-NEXT: # kill: def $ah killed $ah killed $ax
; X87_LIN-NEXT: sahf
; X87_LIN-NEXT: ja .LBB4_2
; X87_LIN-NEXT: # %bb.1:
diff --git a/llvm/test/CodeGen/X86/scalar_widen_div.ll b/llvm/test/CodeGen/X86/scalar_widen_div.ll
index 13e01b2..1f36db5 100644
--- a/llvm/test/CodeGen/X86/scalar_widen_div.ll
+++ b/llvm/test/CodeGen/X86/scalar_widen_div.ll
@@ -81,15 +81,15 @@
; CHECK-LABEL: test_uchar_div:
; CHECK: # %bb.0:
; CHECK-NEXT: movzbl %dil, %eax
-; CHECK-NEXT: # kill: def %eax killed %eax def %ax
+; CHECK-NEXT: # kill: def $eax killed $eax def $ax
; CHECK-NEXT: divb %cl
; CHECK-NEXT: movl %eax, %edi
; CHECK-NEXT: movzbl %sil, %eax
-; CHECK-NEXT: # kill: def %eax killed %eax def %ax
+; CHECK-NEXT: # kill: def $eax killed $eax def $ax
; CHECK-NEXT: divb %r8b
; CHECK-NEXT: movl %eax, %esi
; CHECK-NEXT: movzbl %dl, %eax
-; CHECK-NEXT: # kill: def %eax killed %eax def %ax
+; CHECK-NEXT: # kill: def $eax killed $eax def $ax
; CHECK-NEXT: divb %r9b
; CHECK-NEXT: movl %eax, %ecx
; CHECK-NEXT: movl %edi, %eax
@@ -105,34 +105,34 @@
; CHECK: # %bb.0:
; CHECK-NEXT: pextrw $4, %xmm0, %eax
; CHECK-NEXT: pextrw $4, %xmm1, %ecx
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: cwtd
; CHECK-NEXT: idivw %cx
; CHECK-NEXT: movl %eax, %r8d
; CHECK-NEXT: pextrw $3, %xmm0, %eax
; CHECK-NEXT: pextrw $3, %xmm1, %ecx
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: cwtd
; CHECK-NEXT: idivw %cx
; CHECK-NEXT: movl %eax, %r9d
; CHECK-NEXT: pextrw $2, %xmm0, %eax
; CHECK-NEXT: pextrw $2, %xmm1, %ecx
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: cwtd
; CHECK-NEXT: idivw %cx
; CHECK-NEXT: movl %eax, %edi
; CHECK-NEXT: movd %xmm0, %eax
; CHECK-NEXT: movd %xmm1, %ecx
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: cwtd
; CHECK-NEXT: idivw %cx
; CHECK-NEXT: movl %eax, %ecx
; CHECK-NEXT: pextrw $1, %xmm0, %eax
; CHECK-NEXT: pextrw $1, %xmm1, %esi
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: cwtd
; CHECK-NEXT: idivw %si
-; CHECK-NEXT: # kill: def %ax killed %ax def %eax
+; CHECK-NEXT: # kill: def $ax killed $ax def $eax
; CHECK-NEXT: movd %ecx, %xmm0
; CHECK-NEXT: pinsrw $1, %eax, %xmm0
; CHECK-NEXT: pinsrw $2, %edi, %xmm0
@@ -294,34 +294,34 @@
; CHECK: # %bb.0:
; CHECK-NEXT: pextrw $4, %xmm0, %eax
; CHECK-NEXT: pextrw $4, %xmm1, %ecx
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: cwtd
; CHECK-NEXT: idivw %cx
; CHECK-NEXT: movl %edx, %r8d
; CHECK-NEXT: pextrw $3, %xmm0, %eax
; CHECK-NEXT: pextrw $3, %xmm1, %ecx
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: cwtd
; CHECK-NEXT: idivw %cx
; CHECK-NEXT: movl %edx, %r9d
; CHECK-NEXT: pextrw $2, %xmm0, %eax
; CHECK-NEXT: pextrw $2, %xmm1, %ecx
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: cwtd
; CHECK-NEXT: idivw %cx
; CHECK-NEXT: movl %edx, %edi
; CHECK-NEXT: movd %xmm0, %eax
; CHECK-NEXT: movd %xmm1, %ecx
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: cwtd
; CHECK-NEXT: idivw %cx
; CHECK-NEXT: movl %edx, %ecx
; CHECK-NEXT: pextrw $1, %xmm0, %eax
; CHECK-NEXT: pextrw $1, %xmm1, %esi
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: cwtd
; CHECK-NEXT: idivw %si
-; CHECK-NEXT: # kill: def %dx killed %dx def %edx
+; CHECK-NEXT: # kill: def $dx killed $dx def $edx
; CHECK-NEXT: movd %ecx, %xmm0
; CHECK-NEXT: pinsrw $1, %edx, %xmm0
; CHECK-NEXT: pinsrw $2, %edi, %xmm0
diff --git a/llvm/test/CodeGen/X86/scavenger.mir b/llvm/test/CodeGen/X86/scavenger.mir
index 5e964f8..2fd3393 100644
--- a/llvm/test/CodeGen/X86/scavenger.mir
+++ b/llvm/test/CodeGen/X86/scavenger.mir
@@ -5,10 +5,10 @@
tracksRegLiveness: true
body: |
bb.0:
- ; CHECK: [[REG0:%e[a-z]+]] = MOV32ri 42
- ; CHECK: %ebp = COPY killed [[REG0]]
+ ; CHECK: [[REG0:\$e[a-z]+]] = MOV32ri 42
+ ; CHECK: $ebp = COPY killed [[REG0]]
%0 : gr32 = MOV32ri 42
- %ebp = COPY %0
+ $ebp = COPY %0
...
---
# CHECK-LABEL: name: func2
@@ -16,27 +16,27 @@
tracksRegLiveness: true
body: |
bb.0:
- ; CHECK-NOT: %eax = MOV32ri 42
- ; CHECK: [[REG0:%e[a-z]+]] = MOV32ri 42
- ; CHECK: %ebp = COPY killed [[REG0]]
- %eax = MOV32ri 13
+ ; CHECK-NOT: $eax = MOV32ri 42
+ ; CHECK: [[REG0:\$e[a-z]+]] = MOV32ri 42
+ ; CHECK: $ebp = COPY killed [[REG0]]
+ $eax = MOV32ri 13
%0 : gr32 = MOV32ri 42
- %ebp = COPY %0
+ $ebp = COPY %0
- ; CHECK: [[REG1:%e[a-z]+]] = MOV32ri 23
- ; CHECK: [[REG2:%e[a-z]+]] = MOV32ri 7
- ; CHECK: [[REG1]] = ADD32ri8 [[REG1]], 5, implicit-def dead %eflags
+ ; CHECK: [[REG1:\$e[a-z]+]] = MOV32ri 23
+ ; CHECK: [[REG2:\$e[a-z]+]] = MOV32ri 7
+ ; CHECK: [[REG1]] = ADD32ri8 [[REG1]], 5, implicit-def dead $eflags
%1 : gr32 = MOV32ri 23
%2 : gr32 = MOV32ri 7
- %1 = ADD32ri8 %1, 5, implicit-def dead %eflags
+ %1 = ADD32ri8 %1, 5, implicit-def dead $eflags
- NOOP implicit %ebp
+ NOOP implicit $ebp
; CHECK: NOOP implicit killed [[REG2]]
; CHECK: NOOP implicit killed [[REG1]]
NOOP implicit %2
NOOP implicit %1
- RETQ %eax
+ RETQ $eax
...
---
# CHECK-LABEL: name: func3
@@ -44,6 +44,6 @@
tracksRegLiveness: true
body: |
bb.0:
- ; CHECK dead {{%e[a-z]+}} = MOV32ri 42
+ ; CHECK dead {{\$e[a-z]+}} = MOV32ri 42
dead %0 : gr32 = MOV32ri 42
...
diff --git a/llvm/test/CodeGen/X86/schedule-x86-64-shld.ll b/llvm/test/CodeGen/X86/schedule-x86-64-shld.ll
index fdd9e14..b8bf0fa 100644
--- a/llvm/test/CodeGen/X86/schedule-x86-64-shld.ll
+++ b/llvm/test/CodeGen/X86/schedule-x86-64-shld.ll
@@ -166,7 +166,7 @@
; BTVER2-NEXT: shlq %cl, %rdi # sched: [1:0.50]
; BTVER2-NEXT: movl $64, %ecx # sched: [1:0.50]
; BTVER2-NEXT: subl %edx, %ecx # sched: [1:0.50]
-; BTVER2-NEXT: # kill: def %cl killed %cl killed %ecx
+; BTVER2-NEXT: # kill: def $cl killed $cl killed $ecx
; BTVER2-NEXT: shrq %cl, %rsi # sched: [1:0.50]
; BTVER2-NEXT: orq %rdi, %rsi # sched: [1:0.50]
; BTVER2-NEXT: movq %rsi, %rax # sched: [1:0.50]
@@ -178,7 +178,7 @@
; BDVER1-NEXT: shlq %cl, %rdi
; BDVER1-NEXT: movl $64, %ecx
; BDVER1-NEXT: subl %edx, %ecx
-; BDVER1-NEXT: # kill: def %cl killed %cl killed %ecx
+; BDVER1-NEXT: # kill: def $cl killed $cl killed $ecx
; BDVER1-NEXT: shrq %cl, %rsi
; BDVER1-NEXT: orq %rdi, %rsi
; BDVER1-NEXT: movq %rsi, %rax
@@ -240,7 +240,7 @@
; BTVER2-NEXT: shrq %cl, %rdi # sched: [1:0.50]
; BTVER2-NEXT: movl $64, %ecx # sched: [1:0.50]
; BTVER2-NEXT: subl %edx, %ecx # sched: [1:0.50]
-; BTVER2-NEXT: # kill: def %cl killed %cl killed %ecx
+; BTVER2-NEXT: # kill: def $cl killed $cl killed $ecx
; BTVER2-NEXT: shlq %cl, %rsi # sched: [1:0.50]
; BTVER2-NEXT: orq %rdi, %rsi # sched: [1:0.50]
; BTVER2-NEXT: movq %rsi, %rax # sched: [1:0.50]
@@ -252,7 +252,7 @@
; BDVER1-NEXT: shrq %cl, %rdi
; BDVER1-NEXT: movl $64, %ecx
; BDVER1-NEXT: subl %edx, %ecx
-; BDVER1-NEXT: # kill: def %cl killed %cl killed %ecx
+; BDVER1-NEXT: # kill: def $cl killed $cl killed $ecx
; BDVER1-NEXT: shlq %cl, %rsi
; BDVER1-NEXT: orq %rdi, %rsi
; BDVER1-NEXT: movq %rsi, %rax
@@ -314,7 +314,7 @@
; BTVER2-NEXT: shlq %cl, %rax # sched: [1:0.50]
; BTVER2-NEXT: movl $64, %ecx # sched: [1:0.50]
; BTVER2-NEXT: subl %esi, %ecx # sched: [1:0.50]
-; BTVER2-NEXT: # kill: def %cl killed %cl killed %ecx
+; BTVER2-NEXT: # kill: def $cl killed $cl killed $ecx
; BTVER2-NEXT: shrq %cl, %rdi # sched: [1:0.50]
; BTVER2-NEXT: orq %rax, %rdi # sched: [1:0.50]
; BTVER2-NEXT: movq %rdi, {{.*}}(%rip) # sched: [1:1.00]
@@ -327,7 +327,7 @@
; BDVER1-NEXT: shlq %cl, %rax
; BDVER1-NEXT: movl $64, %ecx
; BDVER1-NEXT: subl %esi, %ecx
-; BDVER1-NEXT: # kill: def %cl killed %cl killed %ecx
+; BDVER1-NEXT: # kill: def $cl killed $cl killed $ecx
; BDVER1-NEXT: shrq %cl, %rdi
; BDVER1-NEXT: orq %rax, %rdi
; BDVER1-NEXT: movq %rdi, {{.*}}(%rip)
diff --git a/llvm/test/CodeGen/X86/schedule-x86_64.ll b/llvm/test/CodeGen/X86/schedule-x86_64.ll
index dda4d7b..a38fbff 100644
--- a/llvm/test/CodeGen/X86/schedule-x86_64.ll
+++ b/llvm/test/CodeGen/X86/schedule-x86_64.ll
@@ -1948,7 +1948,7 @@
; GENERIC-NEXT: bsfw (%rsi), %cx # sched: [8:1.00]
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: orl %ecx, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
+; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_bsf16:
@@ -1958,7 +1958,7 @@
; ATOM-NEXT: bsfw (%rsi), %cx # sched: [16:8.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; ATOM-NEXT: # kill: def %ax killed %ax killed %eax
+; ATOM-NEXT: # kill: def $ax killed $ax killed $eax
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_bsf16:
@@ -1968,7 +1968,7 @@
; SLM-NEXT: bsfw (%rsi), %cx # sched: [4:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; SLM-NEXT: # kill: def %ax killed %ax killed %eax
+; SLM-NEXT: # kill: def $ax killed $ax killed $eax
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_bsf16:
@@ -1978,7 +1978,7 @@
; SANDY-NEXT: bsfw (%rsi), %cx # sched: [8:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: orl %ecx, %eax # sched: [1:0.33]
-; SANDY-NEXT: # kill: def %ax killed %ax killed %eax
+; SANDY-NEXT: # kill: def $ax killed $ax killed $eax
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_bsf16:
@@ -1988,7 +1988,7 @@
; HASWELL-NEXT: bsfw (%rsi), %cx # sched: [8:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; HASWELL-NEXT: # kill: def %ax killed %ax killed %eax
+; HASWELL-NEXT: # kill: def $ax killed $ax killed $eax
; HASWELL-NEXT: retq # sched: [7:1.00]
;
; BROADWELL-LABEL: test_bsf16:
@@ -1998,7 +1998,7 @@
; BROADWELL-NEXT: bsfw (%rsi), %cx # sched: [8:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; BROADWELL-NEXT: # kill: def %ax killed %ax killed %eax
+; BROADWELL-NEXT: # kill: def $ax killed $ax killed $eax
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_bsf16:
@@ -2008,7 +2008,7 @@
; SKYLAKE-NEXT: bsfw (%rsi), %cx # sched: [8:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; SKYLAKE-NEXT: # kill: def %ax killed %ax killed %eax
+; SKYLAKE-NEXT: # kill: def $ax killed $ax killed $eax
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_bsf16:
@@ -2018,7 +2018,7 @@
; SKX-NEXT: bsfw (%rsi), %cx # sched: [8:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; SKX-NEXT: # kill: def %ax killed %ax killed %eax
+; SKX-NEXT: # kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_bsf16:
@@ -2028,7 +2028,7 @@
; BTVER2-NEXT: bsfw (%rsi), %cx # sched: [4:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; BTVER2-NEXT: # kill: def %ax killed %ax killed %eax
+; BTVER2-NEXT: # kill: def $ax killed $ax killed $eax
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_bsf16:
@@ -2038,7 +2038,7 @@
; ZNVER1-NEXT: bsfw (%rsi), %cx # sched: [7:0.50]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: # kill: def %ax killed %ax killed %eax
+; ZNVER1-NEXT: # kill: def $ax killed $ax killed $eax
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call { i16, i16 } asm sideeffect "bsf $2, $0 \0A\09 bsf $3, $1", "=r,=r,r,*m,~{dirflag},~{fpsr},~{flags}"(i16 %a0, i16* %a1)
%2 = extractvalue { i16, i16 } %1, 0
@@ -2247,7 +2247,7 @@
; GENERIC-NEXT: bsrw (%rsi), %cx # sched: [8:1.00]
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: orl %ecx, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
+; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_bsr16:
@@ -2257,7 +2257,7 @@
; ATOM-NEXT: bsrw (%rsi), %cx # sched: [16:8.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; ATOM-NEXT: # kill: def %ax killed %ax killed %eax
+; ATOM-NEXT: # kill: def $ax killed $ax killed $eax
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_bsr16:
@@ -2267,7 +2267,7 @@
; SLM-NEXT: bsrw (%rsi), %cx # sched: [4:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; SLM-NEXT: # kill: def %ax killed %ax killed %eax
+; SLM-NEXT: # kill: def $ax killed $ax killed $eax
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_bsr16:
@@ -2277,7 +2277,7 @@
; SANDY-NEXT: bsrw (%rsi), %cx # sched: [8:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: orl %ecx, %eax # sched: [1:0.33]
-; SANDY-NEXT: # kill: def %ax killed %ax killed %eax
+; SANDY-NEXT: # kill: def $ax killed $ax killed $eax
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_bsr16:
@@ -2287,7 +2287,7 @@
; HASWELL-NEXT: bsrw (%rsi), %cx # sched: [8:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; HASWELL-NEXT: # kill: def %ax killed %ax killed %eax
+; HASWELL-NEXT: # kill: def $ax killed $ax killed $eax
; HASWELL-NEXT: retq # sched: [7:1.00]
;
; BROADWELL-LABEL: test_bsr16:
@@ -2297,7 +2297,7 @@
; BROADWELL-NEXT: bsrw (%rsi), %cx # sched: [8:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; BROADWELL-NEXT: # kill: def %ax killed %ax killed %eax
+; BROADWELL-NEXT: # kill: def $ax killed $ax killed $eax
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_bsr16:
@@ -2307,7 +2307,7 @@
; SKYLAKE-NEXT: bsrw (%rsi), %cx # sched: [8:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; SKYLAKE-NEXT: # kill: def %ax killed %ax killed %eax
+; SKYLAKE-NEXT: # kill: def $ax killed $ax killed $eax
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_bsr16:
@@ -2317,7 +2317,7 @@
; SKX-NEXT: bsrw (%rsi), %cx # sched: [8:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; SKX-NEXT: # kill: def %ax killed %ax killed %eax
+; SKX-NEXT: # kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_bsr16:
@@ -2327,7 +2327,7 @@
; BTVER2-NEXT: bsrw (%rsi), %cx # sched: [4:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; BTVER2-NEXT: # kill: def %ax killed %ax killed %eax
+; BTVER2-NEXT: # kill: def $ax killed $ax killed $eax
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_bsr16:
@@ -2337,7 +2337,7 @@
; ZNVER1-NEXT: bsrw (%rsi), %cx # sched: [7:0.50]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: # kill: def %ax killed %ax killed %eax
+; ZNVER1-NEXT: # kill: def $ax killed $ax killed $eax
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call { i16, i16 } asm sideeffect "bsr $2, $0 \0A\09 bsr $3, $1", "=r,=r,r,*m,~{dirflag},~{fpsr},~{flags}"(i16 %a0, i16* %a1)
%2 = extractvalue { i16, i16 } %1, 0
diff --git a/llvm/test/CodeGen/X86/select.ll b/llvm/test/CodeGen/X86/select.ll
index d3a8d9d..e1c0703 100644
--- a/llvm/test/CodeGen/X86/select.ll
+++ b/llvm/test/CodeGen/X86/select.ll
@@ -145,7 +145,7 @@
; MCU-NEXT: fucompp
; MCU-NEXT: fnstsw %ax
; MCU-NEXT: xorl %edx, %edx
-; MCU-NEXT: # kill: def %ah killed %ah killed %ax
+; MCU-NEXT: # kill: def $ah killed $ah killed $ax
; MCU-NEXT: sahf
; MCU-NEXT: seta %dl
; MCU-NEXT: movb (%ecx,%edx,4), %al
@@ -798,14 +798,14 @@
; GENERIC: ## %bb.0: ## %entry
; GENERIC-NEXT: negw %di
; GENERIC-NEXT: sbbl %eax, %eax
-; GENERIC-NEXT: ## kill: def %ax killed %ax killed %eax
+; GENERIC-NEXT: ## kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test17:
; ATOM: ## %bb.0: ## %entry
; ATOM-NEXT: negw %di
; ATOM-NEXT: sbbl %eax, %eax
-; ATOM-NEXT: ## kill: def %ax killed %ax killed %eax
+; ATOM-NEXT: ## kill: def $ax killed $ax killed $eax
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: nop
@@ -816,7 +816,7 @@
; MCU: # %bb.0: # %entry
; MCU-NEXT: negw %ax
; MCU-NEXT: sbbl %eax, %eax
-; MCU-NEXT: # kill: def %ax killed %ax killed %eax
+; MCU-NEXT: # kill: def $ax killed $ax killed $eax
; MCU-NEXT: retl
entry:
%cmp = icmp ne i16 %x, 0
@@ -1027,7 +1027,7 @@
; MCU-NEXT: cmpl %eax, %ecx
; MCU-NEXT: fucom %st(0)
; MCU-NEXT: fnstsw %ax
-; MCU-NEXT: # kill: def %ah killed %ah killed %ax
+; MCU-NEXT: # kill: def $ah killed $ah killed $ax
; MCU-NEXT: sahf
; MCU-NEXT: jp .LBB24_4
; MCU-NEXT: # %bb.5: # %CF244
@@ -1073,7 +1073,7 @@
; MCU-NEXT: negl %edx
; MCU-NEXT: andl $43, %edx
; MCU-NEXT: xorl %edx, %eax
-; MCU-NEXT: # kill: def %ax killed %ax killed %eax
+; MCU-NEXT: # kill: def $ax killed $ax killed $eax
; MCU-NEXT: retl
entry:
%and = and i8 %cond, 1
diff --git a/llvm/test/CodeGen/X86/select_const.ll b/llvm/test/CodeGen/X86/select_const.ll
index d78f94d..6f79e8c 100644
--- a/llvm/test/CodeGen/X86/select_const.ll
+++ b/llvm/test/CodeGen/X86/select_const.ll
@@ -74,7 +74,7 @@
define i32 @select_0_or_neg1(i1 %cond) {
; CHECK-LABEL: select_0_or_neg1:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: leal -1(%rdi), %eax
; CHECK-NEXT: retq
@@ -85,7 +85,7 @@
define i32 @select_0_or_neg1_zeroext(i1 zeroext %cond) {
; CHECK-LABEL: select_0_or_neg1_zeroext:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
; CHECK-NEXT: leal -1(%rdi), %eax
; CHECK-NEXT: retq
%sel = select i1 %cond, i32 0, i32 -1
@@ -139,7 +139,7 @@
define i32 @select_Cplus1_C(i1 %cond) {
; CHECK-LABEL: select_Cplus1_C:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: leal 41(%rdi), %eax
; CHECK-NEXT: retq
@@ -150,7 +150,7 @@
define i32 @select_Cplus1_C_zeroext(i1 zeroext %cond) {
; CHECK-LABEL: select_Cplus1_C_zeroext:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
; CHECK-NEXT: leal 41(%rdi), %eax
; CHECK-NEXT: retq
%sel = select i1 %cond, i32 42, i32 41
@@ -287,7 +287,7 @@
; CHECK-NEXT: cmpl $43, %edi
; CHECK-NEXT: setl %al
; CHECK-NEXT: leal -1(,%rax,4), %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%cmp = icmp sgt i32 %x, 42
%sel = select i1 %cmp, i16 -1, i16 3
@@ -344,7 +344,7 @@
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: shll $6, %eax
; CHECK-NEXT: orl $7, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%sel = select i1 %cond, i16 7, i16 71
ret i16 %sel
diff --git a/llvm/test/CodeGen/X86/setcc-lowering.ll b/llvm/test/CodeGen/X86/setcc-lowering.ll
index e0390da..3ba2d8c 100644
--- a/llvm/test/CodeGen/X86/setcc-lowering.ll
+++ b/llvm/test/CodeGen/X86/setcc-lowering.ll
@@ -23,7 +23,7 @@
;
; KNL-32-LABEL: pr25080:
; KNL-32: # %bb.0: # %entry
-; KNL-32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-32-NEXT: vpbroadcastd {{.*#+}} ymm1 = [8388607,8388607,8388607,8388607,8388607,8388607,8388607,8388607]
; KNL-32-NEXT: vptestnmd %zmm1, %zmm0, %k0
; KNL-32-NEXT: movb $15, %al
@@ -31,7 +31,7 @@
; KNL-32-NEXT: korw %k1, %k0, %k1
; KNL-32-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-32-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; KNL-32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; KNL-32-NEXT: retl
entry:
%0 = trunc <8 x i32> %a to <8 x i23>
diff --git a/llvm/test/CodeGen/X86/sext-i1.ll b/llvm/test/CodeGen/X86/sext-i1.ll
index bb8a4bc..8abb54e 100644
--- a/llvm/test/CodeGen/X86/sext-i1.ll
+++ b/llvm/test/CodeGen/X86/sext-i1.ll
@@ -124,7 +124,7 @@
;
; X64-LABEL: select_0_or_1s:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: andl $1, %edi
; X64-NEXT: leal -1(%rdi), %eax
; X64-NEXT: retq
@@ -144,7 +144,7 @@
;
; X64-LABEL: select_0_or_1s_zeroext:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal -1(%rdi), %eax
; X64-NEXT: retq
%not = xor i1 %cond, 1
diff --git a/llvm/test/CodeGen/X86/shift-combine.ll b/llvm/test/CodeGen/X86/shift-combine.ll
index 0f2966f..bdbf337 100644
--- a/llvm/test/CodeGen/X86/shift-combine.ll
+++ b/llvm/test/CodeGen/X86/shift-combine.ll
@@ -14,7 +14,7 @@
;
; X64-LABEL: test_lshr_and:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: shrl $2, %edi
; X64-NEXT: andl $3, %edi
; X64-NEXT: movl array(,%rdi,4), %eax
@@ -102,7 +102,7 @@
;
; X64-LABEL: test_exact4:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %esi killed %esi def %rsi
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
; X64-NEXT: subl %edi, %esi
; X64-NEXT: shrl $3, %esi
; X64-NEXT: leaq (%rdx,%rsi,4), %rax
@@ -124,7 +124,7 @@
;
; X64-LABEL: test_exact5:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %esi killed %esi def %rsi
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
; X64-NEXT: subl %edi, %esi
; X64-NEXT: shrl $3, %esi
; X64-NEXT: leaq (%rdx,%rsi,4), %rax
@@ -145,7 +145,7 @@
;
; X64-LABEL: test_exact6:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %esi killed %esi def %rsi
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
; X64-NEXT: subl %edi, %esi
; X64-NEXT: leaq (%rsi,%rdx), %rax
; X64-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/shift-double.ll b/llvm/test/CodeGen/X86/shift-double.ll
index f7ea2e3..9037623 100644
--- a/llvm/test/CodeGen/X86/shift-double.ll
+++ b/llvm/test/CodeGen/X86/shift-double.ll
@@ -278,7 +278,7 @@
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: andl $31, %ecx
-; X86-NEXT: # kill: def %cl killed %cl killed %ecx
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
; X86-NEXT: shldl %cl, %edx, %eax
; X86-NEXT: retl
;
@@ -304,7 +304,7 @@
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: andl $31, %ecx
-; X86-NEXT: # kill: def %cl killed %cl killed %ecx
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
; X86-NEXT: shrdl %cl, %edx, %eax
; X86-NEXT: retl
;
diff --git a/llvm/test/CodeGen/X86/shrink-compare.ll b/llvm/test/CodeGen/X86/shrink-compare.ll
index 32dcf42..9c50a35 100644
--- a/llvm/test/CodeGen/X86/shrink-compare.ll
+++ b/llvm/test/CodeGen/X86/shrink-compare.ll
@@ -72,11 +72,11 @@
; CHECK-NEXT: testl %esi, %esi
; CHECK-NEXT: je .LBB3_1
; CHECK-NEXT: # %bb.2: # %lor.end
-; CHECK-NEXT: # kill: def %al killed %al killed %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB3_1: # %lor.rhs
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: # kill: def %al killed %al killed %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
entry:
%tobool = icmp ne i32 %b, 0
diff --git a/llvm/test/CodeGen/X86/shrink_wrap_dbg_value.mir b/llvm/test/CodeGen/X86/shrink_wrap_dbg_value.mir
index bdc214c..506064a 100644
--- a/llvm/test/CodeGen/X86/shrink_wrap_dbg_value.mir
+++ b/llvm/test/CodeGen/X86/shrink_wrap_dbg_value.mir
@@ -102,8 +102,8 @@
tracksRegLiveness: true
registers:
liveins:
- - { reg: '%ecx', virtual-reg: '' }
- - { reg: '%edx', virtual-reg: '' }
+ - { reg: '$ecx', virtual-reg: '' }
+ - { reg: '$edx', virtual-reg: '' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -134,49 +134,49 @@
body: |
bb.0.entry:
successors: %bb.4(0x40000000), %bb.1(0x40000000)
- liveins: %ecx, %edx
+ liveins: $ecx, $edx
- DBG_VALUE debug-use %edx, debug-use %noreg, !15, !DIExpression(), debug-location !25
- DBG_VALUE debug-use %ecx, debug-use %noreg, !16, !DIExpression(), debug-location !26
- %eax = COPY %ecx
+ DBG_VALUE debug-use $edx, debug-use $noreg, !15, !DIExpression(), debug-location !25
+ DBG_VALUE debug-use $ecx, debug-use $noreg, !16, !DIExpression(), debug-location !26
+ $eax = COPY $ecx
DBG_VALUE %fixed-stack.0, 0, !16, !DIExpression(), debug-location !26
DBG_VALUE %fixed-stack.1, 0, !15, !DIExpression(), debug-location !25
- CMP32rr %eax, killed %edx, implicit-def %eflags, debug-location !27
- JL_1 %bb.4, implicit killed %eflags, debug-location !29
+ CMP32rr $eax, killed $edx, implicit-def $eflags, debug-location !27
+ JL_1 %bb.4, implicit killed $eflags, debug-location !29
JMP_1 %bb.1, debug-location !29
bb.1.for.cond.preheader:
successors: %bb.2(0x80000000)
- %esi = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (load 4 from %fixed-stack.0)
- DBG_VALUE debug-use %esi, debug-use %noreg, !13, !DIExpression(), debug-location !19
- %edi = MOV32rm %fixed-stack.1, 1, %noreg, 0, %noreg :: (load 4 from %fixed-stack.1)
- DBG_VALUE debug-use %edi, debug-use %noreg, !14, !DIExpression(), debug-location !20
- %edi = DEC32r killed %edi, implicit-def dead %eflags, debug-location !30
- %ebx = LEA32r %fixed-stack.1, 1, %noreg, 0, %noreg
+ $esi = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (load 4 from %fixed-stack.0)
+ DBG_VALUE debug-use $esi, debug-use $noreg, !13, !DIExpression(), debug-location !19
+ $edi = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (load 4 from %fixed-stack.1)
+ DBG_VALUE debug-use $edi, debug-use $noreg, !14, !DIExpression(), debug-location !20
+ $edi = DEC32r killed $edi, implicit-def dead $eflags, debug-location !30
+ $ebx = LEA32r %fixed-stack.1, 1, $noreg, 0, $noreg
bb.2.for.cond:
successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- liveins: %ebx, %edi, %esi
+ liveins: $ebx, $edi, $esi
- ADJCALLSTACKDOWN32 4, 0, 4, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp, debug-location !33
+ ADJCALLSTACKDOWN32 4, 0, 4, implicit-def dead $esp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $esp, implicit $ssp, debug-location !33
DBG_VALUE %fixed-stack.1, 0, !14, !DIExpression(), debug-location !20
- PUSH32r %ebx, implicit-def %esp, implicit %esp, debug-location !33
+ PUSH32r $ebx, implicit-def $esp, implicit $esp, debug-location !33
CFI_INSTRUCTION adjust_cfa_offset 4, debug-location !33
- CALLpcrel32 @doSomething, csr_32, implicit %esp, implicit %ssp, implicit-def %esp, implicit-def %ssp, implicit-def %eax, debug-location !33
- ADJCALLSTACKUP32 4, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp, debug-location !33
- %edi = INC32r killed %edi, implicit-def dead %eflags, debug-location !30
- CMP32rr %edi, %esi, implicit-def %eflags, debug-location !30
- JL_1 %bb.2, implicit killed %eflags, debug-location !34
+ CALLpcrel32 @doSomething, csr_32, implicit $esp, implicit $ssp, implicit-def $esp, implicit-def $ssp, implicit-def $eax, debug-location !33
+ ADJCALLSTACKUP32 4, 0, implicit-def dead $esp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $esp, implicit $ssp, debug-location !33
+ $edi = INC32r killed $edi, implicit-def dead $eflags, debug-location !30
+ CMP32rr $edi, $esi, implicit-def $eflags, debug-location !30
+ JL_1 %bb.2, implicit killed $eflags, debug-location !34
bb.3:
successors: %bb.4(0x80000000)
- liveins: %eax
+ liveins: $eax
bb.4.return:
- liveins: %eax
+ liveins: $eax
- RET 8, %eax, debug-location !37
+ RET 8, $eax, debug-location !37
...
diff --git a/llvm/test/CodeGen/X86/shuffle-vs-trunc-256.ll b/llvm/test/CodeGen/X86/shuffle-vs-trunc-256.ll
index 13d5d41..78118a3 100644
--- a/llvm/test/CodeGen/X86/shuffle-vs-trunc-256.ll
+++ b/llvm/test/CodeGen/X86/shuffle-vs-trunc-256.ll
@@ -824,7 +824,7 @@
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -834,7 +834,7 @@
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512F-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -844,7 +844,7 @@
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
-; AVX512VL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
@@ -854,7 +854,7 @@
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512BW-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX512BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -865,7 +865,7 @@
; AVX512BWVL-NEXT: kmovd %eax, %k1
; AVX512BWVL-NEXT: vmovdqu8 %ymm1, %ymm0 {%k1}
; AVX512BWVL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
-; AVX512BWVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BWVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
;
@@ -873,7 +873,7 @@
; AVX512VBMIVL: # %bb.0:
; AVX512VBMIVL-NEXT: vmovdqa {{.*#+}} ymm2 = [32,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,48,18,20,22,24,26,28,30,16,18,20,22,24,26,28,30]
; AVX512VBMIVL-NEXT: vpermt2b %ymm1, %ymm2, %ymm0
-; AVX512VBMIVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512VBMIVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512VBMIVL-NEXT: vzeroupper
; AVX512VBMIVL-NEXT: retq
%strided.vec = shufflevector <32 x i8> %v, <32 x i8> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
diff --git a/llvm/test/CodeGen/X86/simple-register-allocation-read-undef.mir b/llvm/test/CodeGen/X86/simple-register-allocation-read-undef.mir
index 9bbf6ff..6c0a799 100644
--- a/llvm/test/CodeGen/X86/simple-register-allocation-read-undef.mir
+++ b/llvm/test/CodeGen/X86/simple-register-allocation-read-undef.mir
@@ -3,7 +3,7 @@
name: f
body: |
bb.0:
- JB_1 %bb.2, undef implicit killed %eflags
+ JB_1 %bb.2, undef implicit killed $eflags
JMP_1 %bb.1
bb.1:
diff --git a/llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll b/llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll
index 3e4600b..107ca91 100644
--- a/llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll
+++ b/llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll
@@ -5,7 +5,7 @@
define float @foo(float %f) #0 {
; CHECK: {{name: *foo}}
; CHECK: body:
-; CHECK: %0:fr32 = COPY %xmm0
+; CHECK: %0:fr32 = COPY $xmm0
; CHECK: %1:fr32 = VRSQRTSSr killed %2, %0
; CHECK: %3:fr32 = VMULSSrr %0, %1
; CHECK: %4:fr32 = VMOVSSrm
@@ -20,8 +20,8 @@
; CHECK: %14:fr32 = FsFLD0SS
; CHECK: %15:fr32 = VCMPSSrr %0, killed %14, 0
; CHECK: %17:vr128 = VANDNPSrr killed %16, killed %13
-; CHECK: %xmm0 = COPY %18
-; CHECK: RET 0, %xmm0
+; CHECK: $xmm0 = COPY %18
+; CHECK: RET 0, $xmm0
%call = tail call float @llvm.sqrt.f32(float %f) #1
ret float %call
}
@@ -29,7 +29,7 @@
define float @rfoo(float %f) #0 {
; CHECK: {{name: *rfoo}}
; CHECK: body: |
-; CHECK: %0:fr32 = COPY %xmm0
+; CHECK: %0:fr32 = COPY $xmm0
; CHECK: %1:fr32 = VRSQRTSSr killed %2, %0
; CHECK: %3:fr32 = VMULSSrr %0, %1
; CHECK: %4:fr32 = VMOVSSrm
@@ -41,8 +41,8 @@
; CHECK: %10:fr32 = VFMADD213SSr %8, killed %9, %4
; CHECK: %11:fr32 = VMULSSrr %8, %6
; CHECK: %12:fr32 = VMULSSrr killed %11, killed %10
-; CHECK: %xmm0 = COPY %12
-; CHECK: RET 0, %xmm0
+; CHECK: $xmm0 = COPY %12
+; CHECK: RET 0, $xmm0
%sqrt = tail call float @llvm.sqrt.f32(float %f)
%div = fdiv fast float 1.0, %sqrt
ret float %div
diff --git a/llvm/test/CodeGen/X86/sse2-schedule.ll b/llvm/test/CodeGen/X86/sse2-schedule.ll
index c51d19a..a7c1c75 100644
--- a/llvm/test/CodeGen/X86/sse2-schedule.ll
+++ b/llvm/test/CodeGen/X86/sse2-schedule.ll
@@ -5485,61 +5485,61 @@
; GENERIC-LABEL: test_pextrw:
; GENERIC: # %bb.0:
; GENERIC-NEXT: pextrw $6, %xmm0, %eax # sched: [3:1.00]
-; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
+; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pextrw:
; ATOM: # %bb.0:
; ATOM-NEXT: pextrw $6, %xmm0, %eax # sched: [4:2.00]
-; ATOM-NEXT: # kill: def %ax killed %ax killed %eax
+; ATOM-NEXT: # kill: def $ax killed $ax killed $eax
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pextrw:
; SLM: # %bb.0:
; SLM-NEXT: pextrw $6, %xmm0, %eax # sched: [1:1.00]
-; SLM-NEXT: # kill: def %ax killed %ax killed %eax
+; SLM-NEXT: # kill: def $ax killed $ax killed $eax
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pextrw:
; SANDY: # %bb.0:
; SANDY-NEXT: vpextrw $6, %xmm0, %eax # sched: [3:1.00]
-; SANDY-NEXT: # kill: def %ax killed %ax killed %eax
+; SANDY-NEXT: # kill: def $ax killed $ax killed $eax
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pextrw:
; HASWELL: # %bb.0:
; HASWELL-NEXT: vpextrw $6, %xmm0, %eax # sched: [2:1.00]
-; HASWELL-NEXT: # kill: def %ax killed %ax killed %eax
+; HASWELL-NEXT: # kill: def $ax killed $ax killed $eax
; HASWELL-NEXT: retq # sched: [7:1.00]
;
; BROADWELL-LABEL: test_pextrw:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpextrw $6, %xmm0, %eax # sched: [2:1.00]
-; BROADWELL-NEXT: # kill: def %ax killed %ax killed %eax
+; BROADWELL-NEXT: # kill: def $ax killed $ax killed $eax
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pextrw:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpextrw $6, %xmm0, %eax # sched: [3:1.00]
-; SKYLAKE-NEXT: # kill: def %ax killed %ax killed %eax
+; SKYLAKE-NEXT: # kill: def $ax killed $ax killed $eax
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pextrw:
; SKX: # %bb.0:
; SKX-NEXT: vpextrw $6, %xmm0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %ax killed %ax killed %eax
+; SKX-NEXT: # kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pextrw:
; BTVER2: # %bb.0:
; BTVER2-NEXT: vpextrw $6, %xmm0, %eax # sched: [1:0.50]
-; BTVER2-NEXT: # kill: def %ax killed %ax killed %eax
+; BTVER2-NEXT: # kill: def $ax killed $ax killed $eax
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pextrw:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpextrw $6, %xmm0, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: # kill: def %ax killed %ax killed %eax
+; ZNVER1-NEXT: # kill: def $ax killed $ax killed $eax
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = extractelement <8 x i16> %a0, i32 6
ret i16 %1
diff --git a/llvm/test/CodeGen/X86/sse42-schedule.ll b/llvm/test/CodeGen/X86/sse42-schedule.ll
index 47d9a62..26c72aa 100644
--- a/llvm/test/CodeGen/X86/sse42-schedule.ll
+++ b/llvm/test/CodeGen/X86/sse42-schedule.ll
@@ -370,7 +370,7 @@
; GENERIC-NEXT: movl $7, %eax # sched: [1:0.33]
; GENERIC-NEXT: movl $7, %edx # sched: [1:0.33]
; GENERIC-NEXT: pcmpestri $7, (%rdi), %xmm0 # sched: [4:2.33]
-; GENERIC-NEXT: # kill: def %ecx killed %ecx def %rcx
+; GENERIC-NEXT: # kill: def $ecx killed $ecx def $rcx
; GENERIC-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
@@ -383,7 +383,7 @@
; SLM-NEXT: movl $7, %edx # sched: [1:0.50]
; SLM-NEXT: movl %ecx, %esi # sched: [1:0.50]
; SLM-NEXT: pcmpestri $7, (%rdi), %xmm0 # sched: [21:21.00]
-; SLM-NEXT: # kill: def %ecx killed %ecx def %rcx
+; SLM-NEXT: # kill: def $ecx killed $ecx def $rcx
; SLM-NEXT: leal (%rcx,%rsi), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
@@ -396,7 +396,7 @@
; SANDY-NEXT: movl $7, %eax # sched: [1:0.33]
; SANDY-NEXT: movl $7, %edx # sched: [1:0.33]
; SANDY-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [4:2.33]
-; SANDY-NEXT: # kill: def %ecx killed %ecx def %rcx
+; SANDY-NEXT: # kill: def $ecx killed $ecx def $rcx
; SANDY-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
@@ -409,7 +409,7 @@
; HASWELL-NEXT: movl $7, %eax # sched: [1:0.25]
; HASWELL-NEXT: movl $7, %edx # sched: [1:0.25]
; HASWELL-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [24:4.00]
-; HASWELL-NEXT: # kill: def %ecx killed %ecx def %rcx
+; HASWELL-NEXT: # kill: def $ecx killed $ecx def $rcx
; HASWELL-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [7:1.00]
;
@@ -422,7 +422,7 @@
; BROADWELL-NEXT: movl $7, %eax # sched: [1:0.25]
; BROADWELL-NEXT: movl $7, %edx # sched: [1:0.25]
; BROADWELL-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [23:4.00]
-; BROADWELL-NEXT: # kill: def %ecx killed %ecx def %rcx
+; BROADWELL-NEXT: # kill: def $ecx killed $ecx def $rcx
; BROADWELL-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
@@ -435,7 +435,7 @@
; SKYLAKE-NEXT: movl $7, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: movl $7, %edx # sched: [1:0.25]
; SKYLAKE-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [24:4.00]
-; SKYLAKE-NEXT: # kill: def %ecx killed %ecx def %rcx
+; SKYLAKE-NEXT: # kill: def $ecx killed $ecx def $rcx
; SKYLAKE-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
@@ -448,7 +448,7 @@
; SKX-NEXT: movl $7, %eax # sched: [1:0.25]
; SKX-NEXT: movl $7, %edx # sched: [1:0.25]
; SKX-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [24:4.00]
-; SKX-NEXT: # kill: def %ecx killed %ecx def %rcx
+; SKX-NEXT: # kill: def $ecx killed $ecx def $rcx
; SKX-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
@@ -461,7 +461,7 @@
; BTVER2-NEXT: movl $7, %edx # sched: [1:0.50]
; BTVER2-NEXT: movl %ecx, %esi # sched: [1:0.50]
; BTVER2-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [19:10.00]
-; BTVER2-NEXT: # kill: def %ecx killed %ecx def %rcx
+; BTVER2-NEXT: # kill: def $ecx killed $ecx def $rcx
; BTVER2-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
@@ -474,7 +474,7 @@
; ZNVER1-NEXT: movl $7, %edx # sched: [1:0.25]
; ZNVER1-NEXT: movl %ecx, %esi # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [100:?]
-; ZNVER1-NEXT: # kill: def %ecx killed %ecx def %rcx
+; ZNVER1-NEXT: # kill: def $ecx killed $ecx def $rcx
; ZNVER1-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %a0, i32 7, <16 x i8> %a1, i32 7, i8 7)
@@ -588,7 +588,7 @@
; GENERIC-NEXT: pcmpistri $7, %xmm1, %xmm0 # sched: [11:3.00]
; GENERIC-NEXT: movl %ecx, %eax # sched: [1:0.33]
; GENERIC-NEXT: pcmpistri $7, (%rdi), %xmm0 # sched: [17:3.00]
-; GENERIC-NEXT: # kill: def %ecx killed %ecx def %rcx
+; GENERIC-NEXT: # kill: def $ecx killed $ecx def $rcx
; GENERIC-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
@@ -597,7 +597,7 @@
; SLM-NEXT: pcmpistri $7, %xmm1, %xmm0 # sched: [17:17.00]
; SLM-NEXT: movl %ecx, %eax # sched: [1:0.50]
; SLM-NEXT: pcmpistri $7, (%rdi), %xmm0 # sched: [17:17.00]
-; SLM-NEXT: # kill: def %ecx killed %ecx def %rcx
+; SLM-NEXT: # kill: def $ecx killed $ecx def $rcx
; SLM-NEXT: leal (%rcx,%rax), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
@@ -606,7 +606,7 @@
; SANDY-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [11:3.00]
; SANDY-NEXT: movl %ecx, %eax # sched: [1:0.33]
; SANDY-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [17:3.00]
-; SANDY-NEXT: # kill: def %ecx killed %ecx def %rcx
+; SANDY-NEXT: # kill: def $ecx killed $ecx def $rcx
; SANDY-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
@@ -615,7 +615,7 @@
; HASWELL-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [11:3.00]
; HASWELL-NEXT: movl %ecx, %eax # sched: [1:0.25]
; HASWELL-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [17:3.00]
-; HASWELL-NEXT: # kill: def %ecx killed %ecx def %rcx
+; HASWELL-NEXT: # kill: def $ecx killed $ecx def $rcx
; HASWELL-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [7:1.00]
;
@@ -624,7 +624,7 @@
; BROADWELL-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [11:3.00]
; BROADWELL-NEXT: movl %ecx, %eax # sched: [1:0.25]
; BROADWELL-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [16:3.00]
-; BROADWELL-NEXT: # kill: def %ecx killed %ecx def %rcx
+; BROADWELL-NEXT: # kill: def $ecx killed $ecx def $rcx
; BROADWELL-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
@@ -633,7 +633,7 @@
; SKYLAKE-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [10:3.00]
; SKYLAKE-NEXT: movl %ecx, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [16:3.00]
-; SKYLAKE-NEXT: # kill: def %ecx killed %ecx def %rcx
+; SKYLAKE-NEXT: # kill: def $ecx killed $ecx def $rcx
; SKYLAKE-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
@@ -642,7 +642,7 @@
; SKX-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [10:3.00]
; SKX-NEXT: movl %ecx, %eax # sched: [1:0.25]
; SKX-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [16:3.00]
-; SKX-NEXT: # kill: def %ecx killed %ecx def %rcx
+; SKX-NEXT: # kill: def $ecx killed $ecx def $rcx
; SKX-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
@@ -651,7 +651,7 @@
; BTVER2-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [7:2.00]
; BTVER2-NEXT: movl %ecx, %eax # sched: [1:0.50]
; BTVER2-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [12:2.00]
-; BTVER2-NEXT: # kill: def %ecx killed %ecx def %rcx
+; BTVER2-NEXT: # kill: def $ecx killed $ecx def $rcx
; BTVER2-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
@@ -660,7 +660,7 @@
; ZNVER1-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: movl %ecx, %eax # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [100:?]
-; ZNVER1-NEXT: # kill: def %ecx killed %ecx def %rcx
+; ZNVER1-NEXT: # kill: def $ecx killed $ecx def $rcx
; ZNVER1-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %a0, <16 x i8> %a1, i8 7)
diff --git a/llvm/test/CodeGen/X86/subvector-broadcast.ll b/llvm/test/CodeGen/X86/subvector-broadcast.ll
index f44e9a9..bcb7d14 100644
--- a/llvm/test/CodeGen/X86/subvector-broadcast.ll
+++ b/llvm/test/CodeGen/X86/subvector-broadcast.ll
@@ -1093,13 +1093,13 @@
define <4 x double> @reg_broadcast_2f64_4f64(<2 x double> %a0) nounwind {
; X32-LABEL: reg_broadcast_2f64_4f64:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: reg_broadcast_2f64_4f64:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <2 x double> %a0, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -1109,28 +1109,28 @@
define <8 x double> @reg_broadcast_2f64_8f64(<2 x double> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_2f64_8f64:
; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: reg_broadcast_2f64_8f64:
; X32-AVX512: # %bb.0:
-; X32-AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_2f64_8f64:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: reg_broadcast_2f64_8f64:
; X64-AVX512: # %bb.0:
-; X64-AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
@@ -1146,7 +1146,7 @@
;
; X32-AVX512-LABEL: reg_broadcast_4f64_8f64:
; X32-AVX512: # %bb.0:
-; X32-AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; X32-AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
@@ -1157,7 +1157,7 @@
;
; X64-AVX512-LABEL: reg_broadcast_4f64_8f64:
; X64-AVX512: # %bb.0:
-; X64-AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; X64-AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
%1 = shufflevector <4 x double> %a0, <4 x double> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -1167,13 +1167,13 @@
define <4 x i64> @reg_broadcast_2i64_4i64(<2 x i64> %a0) nounwind {
; X32-LABEL: reg_broadcast_2i64_4i64:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: reg_broadcast_2i64_4i64:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <2 x i64> %a0, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -1183,28 +1183,28 @@
define <8 x i64> @reg_broadcast_2i64_8i64(<2 x i64> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_2i64_8i64:
; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: reg_broadcast_2i64_8i64:
; X32-AVX512: # %bb.0:
-; X32-AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_2i64_8i64:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: reg_broadcast_2i64_8i64:
; X64-AVX512: # %bb.0:
-; X64-AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
@@ -1220,7 +1220,7 @@
;
; X32-AVX512-LABEL: reg_broadcast_4i64_8i64:
; X32-AVX512: # %bb.0:
-; X32-AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; X32-AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
@@ -1231,7 +1231,7 @@
;
; X64-AVX512-LABEL: reg_broadcast_4i64_8i64:
; X64-AVX512: # %bb.0:
-; X64-AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; X64-AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
%1 = shufflevector <4 x i64> %a0, <4 x i64> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -1241,13 +1241,13 @@
define <8 x float> @reg_broadcast_4f32_8f32(<4 x float> %a0) nounwind {
; X32-LABEL: reg_broadcast_4f32_8f32:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: reg_broadcast_4f32_8f32:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <4 x float> %a0, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -1257,28 +1257,28 @@
define <16 x float> @reg_broadcast_4f32_16f32(<4 x float> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_4f32_16f32:
; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: reg_broadcast_4f32_16f32:
; X32-AVX512: # %bb.0:
-; X32-AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_4f32_16f32:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: reg_broadcast_4f32_16f32:
; X64-AVX512: # %bb.0:
-; X64-AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
@@ -1294,7 +1294,7 @@
;
; X32-AVX512-LABEL: reg_broadcast_8f32_16f32:
; X32-AVX512: # %bb.0:
-; X32-AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; X32-AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
@@ -1305,7 +1305,7 @@
;
; X64-AVX512-LABEL: reg_broadcast_8f32_16f32:
; X64-AVX512: # %bb.0:
-; X64-AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; X64-AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
%1 = shufflevector <8 x float> %a0, <8 x float> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -1315,13 +1315,13 @@
define <8 x i32> @reg_broadcast_4i32_8i32(<4 x i32> %a0) nounwind {
; X32-LABEL: reg_broadcast_4i32_8i32:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: reg_broadcast_4i32_8i32:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -1331,28 +1331,28 @@
define <16 x i32> @reg_broadcast_4i32_16i32(<4 x i32> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_4i32_16i32:
; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: reg_broadcast_4i32_16i32:
; X32-AVX512: # %bb.0:
-; X32-AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_4i32_16i32:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: reg_broadcast_4i32_16i32:
; X64-AVX512: # %bb.0:
-; X64-AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
@@ -1368,7 +1368,7 @@
;
; X32-AVX512-LABEL: reg_broadcast_8i32_16i32:
; X32-AVX512: # %bb.0:
-; X32-AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; X32-AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
@@ -1379,7 +1379,7 @@
;
; X64-AVX512-LABEL: reg_broadcast_8i32_16i32:
; X64-AVX512: # %bb.0:
-; X64-AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; X64-AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
%1 = shufflevector <8 x i32> %a0, <8 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -1389,13 +1389,13 @@
define <16 x i16> @reg_broadcast_8i16_16i16(<8 x i16> %a0) nounwind {
; X32-LABEL: reg_broadcast_8i16_16i16:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: reg_broadcast_8i16_16i16:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -1405,56 +1405,56 @@
define <32 x i16> @reg_broadcast_8i16_32i16(<8 x i16> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_8i16_32i16:
; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512F-LABEL: reg_broadcast_8i16_32i16:
; X32-AVX512F: # %bb.0:
-; X32-AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512F-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: reg_broadcast_8i16_32i16:
; X32-AVX512BW: # %bb.0:
-; X32-AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: reg_broadcast_8i16_32i16:
; X32-AVX512DQ: # %bb.0:
-; X32-AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX512DQ-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_8i16_32i16:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512F-LABEL: reg_broadcast_8i16_32i16:
; X64-AVX512F: # %bb.0:
-; X64-AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512F-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: reg_broadcast_8i16_32i16:
; X64-AVX512BW: # %bb.0:
-; X64-AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: reg_broadcast_8i16_32i16:
; X64-AVX512DQ: # %bb.0:
-; X64-AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX512DQ-NEXT: retq
@@ -1475,7 +1475,7 @@
;
; X32-AVX512BW-LABEL: reg_broadcast_16i16_32i16:
; X32-AVX512BW: # %bb.0:
-; X32-AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; X32-AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: retl
;
@@ -1496,7 +1496,7 @@
;
; X64-AVX512BW-LABEL: reg_broadcast_16i16_32i16:
; X64-AVX512BW: # %bb.0:
-; X64-AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; X64-AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: retq
;
@@ -1511,13 +1511,13 @@
define <32 x i8> @reg_broadcast_16i8_32i8(<16 x i8> %a0) nounwind {
; X32-LABEL: reg_broadcast_16i8_32i8:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: reg_broadcast_16i8_32i8:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -1527,56 +1527,56 @@
define <64 x i8> @reg_broadcast_16i8_64i8(<16 x i8> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_16i8_64i8:
; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512F-LABEL: reg_broadcast_16i8_64i8:
; X32-AVX512F: # %bb.0:
-; X32-AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512F-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: reg_broadcast_16i8_64i8:
; X32-AVX512BW: # %bb.0:
-; X32-AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: reg_broadcast_16i8_64i8:
; X32-AVX512DQ: # %bb.0:
-; X32-AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX512DQ-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_16i8_64i8:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512F-LABEL: reg_broadcast_16i8_64i8:
; X64-AVX512F: # %bb.0:
-; X64-AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512F-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: reg_broadcast_16i8_64i8:
; X64-AVX512BW: # %bb.0:
-; X64-AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: reg_broadcast_16i8_64i8:
; X64-AVX512DQ: # %bb.0:
-; X64-AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX512DQ-NEXT: retq
@@ -1597,7 +1597,7 @@
;
; X32-AVX512BW-LABEL: reg_broadcast_32i8_64i8:
; X32-AVX512BW: # %bb.0:
-; X32-AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; X32-AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: retl
;
@@ -1618,7 +1618,7 @@
;
; X64-AVX512BW-LABEL: reg_broadcast_32i8_64i8:
; X64-AVX512BW: # %bb.0:
-; X64-AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; X64-AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/swift-return.ll b/llvm/test/CodeGen/X86/swift-return.ll
index f45d61a..164d7c3 100644
--- a/llvm/test/CodeGen/X86/swift-return.ll
+++ b/llvm/test/CodeGen/X86/swift-return.ll
@@ -13,10 +13,10 @@
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: movl %edi, {{[0-9]+}}(%rsp)
; CHECK-NEXT: callq gen
-; CHECK-NEXT: # kill: def %ax killed %ax def %eax
+; CHECK-NEXT: # kill: def $ax killed $ax def $eax
; CHECK-NEXT: movsbl %dl, %ecx
; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: popq %rcx
; CHECK-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/switch-lower-peel-top-case.ll b/llvm/test/CodeGen/X86/switch-lower-peel-top-case.ll
index 8a169c4..76a78de 100644
--- a/llvm/test/CodeGen/X86/switch-lower-peel-top-case.ll
+++ b/llvm/test/CodeGen/X86/switch-lower-peel-top-case.ll
@@ -11,34 +11,34 @@
], !prof !2
; CHECK: successors: %[[PEELED_CASE_LABEL:.*]](0x5999999a), %[[PEELED_SWITCH_LABEL:.*]](0x26666666)
-; CHECK: %[[VAL:[0-9]+]]:gr32 = COPY %edi
-; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], 18568, implicit-def %eflags
-; CHECK: JE_1 %[[PEELED_CASE_LABEL]], implicit %eflags
+; CHECK: %[[VAL:[0-9]+]]:gr32 = COPY $edi
+; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], 18568, implicit-def $eflags
+; CHECK: JE_1 %[[PEELED_CASE_LABEL]], implicit $eflags
; CHECK: JMP_1 %[[PEELED_SWITCH_LABEL]]
; CHECK: [[PEELED_SWITCH_LABEL]].{{[a-zA-Z0-9.]+}}:
; CHECK: successors: %[[BB1_LABEL:.*]](0x0206d3a0), %[[BB2_LABEL:.*]](0x7df92c60)
-; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], 18311, implicit-def %eflags
-; CHECK: JG_1 %[[BB2_LABEL]], implicit %eflags
+; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], 18311, implicit-def $eflags
+; CHECK: JG_1 %[[BB2_LABEL]], implicit $eflags
; CHECK: JMP_1 %[[BB1_LABEL]]
; CHECK: [[BB1_LABEL]].{{[a-zA-Z0-9.]+}}:
; CHECK: successors: %[[CASE2_LABEL:.*]](0x35e50d5b), %[[BB3_LABEL:.*]](0x4a1af2a5)
-; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], -8826, implicit-def %eflags
-; CHECK: JE_1 %[[CASE2_LABEL]], implicit %eflags
+; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], -8826, implicit-def $eflags
+; CHECK: JE_1 %[[CASE2_LABEL]], implicit $eflags
; CHECK: JMP_1 %[[BB3_LABEL]]
; CHECK: [[BB3_LABEL]]
; CHECK: successors: %[[CASE5_LABEL:.*]](0x45d173c8), %[[BB4_LABEL:.*]](0x3a2e8c38)
-; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], 129, implicit-def %eflags
-; CHECK: JE_1 %[[CASE5_LABEL]], implicit %eflags
+; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], 129, implicit-def $eflags
+; CHECK: JE_1 %[[CASE5_LABEL]], implicit $eflags
; CHECK: JMP_1 %[[BB4_LABEL]]
; CHECK: [[BB4_LABEL:.*]].{{[a-zA-Z0-9.]+}}:
; CHECK: successors: %[[CASE1_LABEL:.*]](0x66666666), %[[DEFAULT_BB_LABEL:.*]](0x1999999a)
-; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 8, implicit-def %eflags
-; CHECK: JE_1 %[[CASE1_LABEL]], implicit %eflags
+; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 8, implicit-def $eflags
+; CHECK: JE_1 %[[CASE1_LABEL]], implicit $eflags
; CHECK: JMP_1 %[[DEFAULT_BB_LABEL]]
; CHECK: [[BB2_LABEL]].{{[a-zA-Z0-9.]+}}:
; CHECK: successors: %[[CASE3_LABEL:.*]](0x7fe44107), %[[DEFAULT_BB_LABEL]](0x001bbef9)
-; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], 18312, implicit-def %eflags
-; CHECK: JE_1 %[[CASE3_LABEL]], implicit %eflags
+; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], 18312, implicit-def $eflags
+; CHECK: JE_1 %[[CASE3_LABEL]], implicit $eflags
; CHECK: JMP_1 %[[DEFAULT_BB_LABEL]]
bb1:
@@ -73,40 +73,40 @@
], !prof !3
; CHECK: successors: %[[PEELED_CASE_LABEL:.*]](0x59999999), %[[PEELED_SWITCH_LABEL:.*]](0x26666667)
-; CHECK: %[[VAL:[0-9]+]]:gr32 = COPY %edi
-; CHECK: %{{[0-9]+}}:gr32 = ADD32ri8 %{{[0-9]+}}, -85, implicit-def dead %eflags
-; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %{{[0-9]+}}, 2, implicit-def %eflags
-; CHECK: JB_1 %[[PEELED_CASE_LABEL]], implicit %eflags
+; CHECK: %[[VAL:[0-9]+]]:gr32 = COPY $edi
+; CHECK: %{{[0-9]+}}:gr32 = ADD32ri8 %{{[0-9]+}}, -85, implicit-def dead $eflags
+; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %{{[0-9]+}}, 2, implicit-def $eflags
+; CHECK: JB_1 %[[PEELED_CASE_LABEL]], implicit $eflags
; CHECK: JMP_1 %[[PEELED_SWITCH_LABEL]]
; CHECK: [[PEELED_SWITCH_LABEL]].{{[a-zA-Z0-9.]+}}:
; CHECK: successors: %[[BB1_LABEL:.*]](0x0088888a), %[[BB2_LABEL:.*]](0x7f777776)
-; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 4, implicit-def %eflags
-; CHECK: JG_1 %[[BB2_LABEL]], implicit %eflags
+; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 4, implicit-def $eflags
+; CHECK: JG_1 %[[BB2_LABEL]], implicit $eflags
; CHECK: JMP_1 %[[BB1_LABEL]]
; CHECK: [[BB1_LABEL]].{{[a-zA-Z0-9.]+}}:
; CHECK: successors: %[[CASE4_LABEL:.*]](0x7f775a4f), %[[BB3_LABEL:.*]](0x0088a5b1)
-; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 1, implicit-def %eflags
-; CHECK: JE_1 %[[CASE4_LABEL]], implicit %eflags
+; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 1, implicit-def $eflags
+; CHECK: JE_1 %[[CASE4_LABEL]], implicit $eflags
; CHECK: JMP_1 %[[BB3_LABEL]]
; CHECK: [[BB3_LABEL]].{{[a-zA-Z0-9.]+}}:
; CHECK: successors: %[[CASE1_LABEL:.*]](0x66666666), %[[DEFAULT_BB_LABEL:.*]](0x1999999a)
-; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], -40, implicit-def %eflags
-; CHECK: JE_1 %[[CASE1_LABEL]], implicit %eflags
+; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], -40, implicit-def $eflags
+; CHECK: JE_1 %[[CASE1_LABEL]], implicit $eflags
; CHECK: JMP_1 %[[DEFAULT_BB_LABEL]]
; CHECK: [[BB2_LABEL]].{{[a-zA-Z0-9.]+}}:
; CHECK: successors: %[[CASE5_LABEL:.*]](0x00000000), %[[BB4_LABEL:.*]](0x80000000)
-; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 5, implicit-def %eflags
-; CHECK: JE_1 %[[CASE5_LABEL]], implicit %eflags
+; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 5, implicit-def $eflags
+; CHECK: JE_1 %[[CASE5_LABEL]], implicit $eflags
; CHECK: JMP_1 %[[BB4_LABEL]]
; CHECK: [[BB4_LABEL]].{{[a-zA-Z0-9.]+}}:
; CHECK: successors: %[[CASE6_LABEL:.*]](0x00000000), %[[BB5_LABEL:.*]](0x80000000)
-; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 7, implicit-def %eflags
-; CHECK: JE_1 %[[CASE6_LABEL]], implicit %eflags
+; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 7, implicit-def $eflags
+; CHECK: JE_1 %[[CASE6_LABEL]], implicit $eflags
; CHECK: JMP_1 %[[BB5_LABEL]]
; CHECK: [[BB5_LABEL]].{{[a-zA-Z0-9.]+}}:
; CHECK: successors: %[[CASE7_LABEL:.*]](0x00000000), %[[DEFAULT_BB_LABEL]](0x80000000)
-; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 49, implicit-def %eflags
-; CHECK: JE_1 %[[CASE7_LABEL]], implicit %eflags
+; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 49, implicit-def $eflags
+; CHECK: JE_1 %[[CASE7_LABEL]], implicit $eflags
; CHECK: JMP_1 %[[DEFAULT_BB_LABEL]]
diff --git a/llvm/test/CodeGen/X86/tail-call-conditional.mir b/llvm/test/CodeGen/X86/tail-call-conditional.mir
index 300b273..329c4cb 100644
--- a/llvm/test/CodeGen/X86/tail-call-conditional.mir
+++ b/llvm/test/CodeGen/X86/tail-call-conditional.mir
@@ -29,57 +29,57 @@
name: test
tracksRegLiveness: true
liveins:
- - { reg: '%rdi' }
- - { reg: '%rsi' }
+ - { reg: '$rdi' }
+ - { reg: '$rsi' }
body: |
bb.0:
successors: %bb.1, %bb.4
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
- %rax = COPY %rdi
- CMP64ri8 %rax, 99, implicit-def %eflags
- JA_1 %bb.4, implicit %eflags
+ $rax = COPY $rdi
+ CMP64ri8 $rax, 99, implicit-def $eflags
+ JA_1 %bb.4, implicit $eflags
JMP_1 %bb.1
; CHECK: bb.1:
; CHECK-NEXT: successors: %bb.2({{[^)]+}}){{$}}
- ; CHECK-NEXT: liveins: %rax, %rsi
+ ; CHECK-NEXT: liveins: $rax, $rsi
; CHECK-NEXT: {{^ $}}
- ; CHECK-NEXT: %rdi = COPY %rsi
- ; CHECK-NEXT: %rsi = COPY %rax
- ; CHECK-NEXT: CMP64ri8 %rax, 9, implicit-def %eflags
- ; CHECK-NEXT: TCRETURNdi64cc @f1, 0, 3, csr_64, implicit %rsp, implicit %eflags, implicit %ssp, implicit %rsp, implicit %rdi, implicit %rsi, implicit %rax, implicit-def %rax, implicit %sil, implicit-def %sil, implicit %si, implicit-def %si, implicit %esi, implicit-def %esi, implicit %rsi, implicit-def %rsi, implicit %dil, implicit-def %dil, implicit %di, implicit-def %di, implicit %edi, implicit-def %edi, implicit %rdi, implicit-def %rdi, implicit %ah, implicit-def %ah, implicit %al, implicit-def %al, implicit %ax, implicit-def %ax, implicit %eax, implicit-def %eax
+ ; CHECK-NEXT: $rdi = COPY $rsi
+ ; CHECK-NEXT: $rsi = COPY $rax
+ ; CHECK-NEXT: CMP64ri8 $rax, 9, implicit-def $eflags
+ ; CHECK-NEXT: TCRETURNdi64cc @f1, 0, 3, csr_64, implicit $rsp, implicit $eflags, implicit $ssp, implicit $rsp, implicit $rdi, implicit $rsi, implicit $rax, implicit-def $rax, implicit $sil, implicit-def $sil, implicit $si, implicit-def $si, implicit $esi, implicit-def $esi, implicit $rsi, implicit-def $rsi, implicit $dil, implicit-def $dil, implicit $di, implicit-def $di, implicit $edi, implicit-def $edi, implicit $rdi, implicit-def $rdi, implicit $ah, implicit-def $ah, implicit $al, implicit-def $al, implicit $ax, implicit-def $ax, implicit $eax, implicit-def $eax
bb.1:
successors: %bb.2, %bb.3
- liveins: %rax, %rsi
+ liveins: $rax, $rsi
- CMP64ri8 %rax, 9, implicit-def %eflags
- JA_1 %bb.3, implicit %eflags
+ CMP64ri8 $rax, 9, implicit-def $eflags
+ JA_1 %bb.3, implicit $eflags
JMP_1 %bb.2
bb.2:
- liveins: %rax, %rsi
+ liveins: $rax, $rsi
- %rdi = COPY %rsi
- %rsi = COPY %rax
+ $rdi = COPY $rsi
+ $rsi = COPY $rax
- TCRETURNdi64 @f1, 0, csr_64, implicit %rsp, implicit %rdi, implicit %rsi
+ TCRETURNdi64 @f1, 0, csr_64, implicit $rsp, implicit $rdi, implicit $rsi
; CHECK: bb.2:
- ; CHECK-NEXT: liveins: %rax, %rdi, %rsi
+ ; CHECK-NEXT: liveins: $rax, $rdi, $rsi
; CHECK-NEXT: {{^ $}}
- ; CHECK-NEXT: TCRETURNdi64 @f2, 0, csr_64, implicit %rsp, implicit %rdi, implicit %rsi
+ ; CHECK-NEXT: TCRETURNdi64 @f2, 0, csr_64, implicit $rsp, implicit $rdi, implicit $rsi
bb.3:
- liveins: %rax, %rsi
+ liveins: $rax, $rsi
- %rdi = COPY %rsi
- %rsi = COPY %rax
- TCRETURNdi64 @f2, 0, csr_64, implicit %rsp, implicit %rdi, implicit %rsi
+ $rdi = COPY $rsi
+ $rsi = COPY $rax
+ TCRETURNdi64 @f2, 0, csr_64, implicit $rsp, implicit $rdi, implicit $rsi
bb.4:
- dead %eax = MOV32ri64 123, implicit-def %rax
- RET 0, %rax
+ dead $eax = MOV32ri64 123, implicit-def $rax
+ RET 0, $rax
...
diff --git a/llvm/test/CodeGen/X86/tail-dup-debugloc.ll b/llvm/test/CodeGen/X86/tail-dup-debugloc.ll
index df1a8ee..e8cddd9 100644
--- a/llvm/test/CodeGen/X86/tail-dup-debugloc.ll
+++ b/llvm/test/CodeGen/X86/tail-dup-debugloc.ll
@@ -4,7 +4,7 @@
; 'while.cond1.preheader.lr.ph' survives after tailduplication pass.
;
; CHECK: [[DLOC:![0-9]+]] = !DILocation(line: 9, column: 5, scope: !{{[0-9]+}})
-; CHECK: [[VREG:%[^ ]+]]:gr64 = COPY %rdi
+; CHECK: [[VREG:%[^ ]+]]:gr64 = COPY $rdi
; CHECK: TEST64rr [[VREG]], [[VREG]]
; CHECK-NEXT: JE_1 {{.+}}, debug-location [[DLOC]]
; CHECK-NEXT: JMP_1 {{.+}}, debug-location [[DLOC]]
diff --git a/llvm/test/CodeGen/X86/tail-merge-after-mbp.mir b/llvm/test/CodeGen/X86/tail-merge-after-mbp.mir
index 042ac72..29b6ead 100644
--- a/llvm/test/CodeGen/X86/tail-merge-after-mbp.mir
+++ b/llvm/test/CodeGen/X86/tail-merge-after-mbp.mir
@@ -5,26 +5,26 @@
# check loop bb.9 is not merged with bb.12
# CHECK: bb.2:
# CHECK-NEXT: successors: %bb.3(0x30000000), %bb.4(0x50000000)
-# CHECK: %rax = MOV64rm %r14, 1, %noreg, 0, %noreg
-# CHECK-NEXT: TEST64rr %rax, %rax
+# CHECK: $rax = MOV64rm $r14, 1, $noreg, 0, $noreg
+# CHECK-NEXT: TEST64rr $rax, $rax
# CHECK-NEXT: JE_1 %bb.3
# CHECK: bb.4:
# CHECK-NEXT: successors: %bb.5(0x30000000), %bb.10(0x50000000)
-# CHECK: CMP64mi8 killed %rax, 1, %noreg, 8, %noreg, 0
+# CHECK: CMP64mi8 killed $rax, 1, $noreg, 8, $noreg, 0
# CHECK-NEXT: JNE_1 %bb.10
# CHECK: bb.5:
# CHECK-NEXT: successors: %bb.6(0x30000000), %bb.7(0x50000000)
-# CHECK: %rax = MOV64rm %r14, 1, %noreg, 0, %noreg
-# CHECK-NEXT: TEST64rr %rax, %rax
+# CHECK: $rax = MOV64rm $r14, 1, $noreg, 0, $noreg
+# CHECK-NEXT: TEST64rr $rax, $rax
# CHECK-NEXT: JE_1 %bb.6
# CHECK: bb.7
# CHECK-NEXT: successors: %bb.8(0x71555555), %bb.10(0x0eaaaaab)
-# CHECK: CMP64mi8 killed %rax, 1, %noreg, 8, %noreg, 0
+# CHECK: CMP64mi8 killed $rax, 1, $noreg, 8, $noreg, 0
# CHECK-NEXT: JNE_1 %bb.10
# CHECK: bb.8:
# CHECK-NEXT: successors: %bb.9(0x04000000), %bb.7(0x7c000000)
-# CHECK: %rax = MOV64rm %r14, 1, %noreg, 0, %noreg
-# CHECK-NEXT: TEST64rr %rax, %rax
+# CHECK: $rax = MOV64rm $r14, 1, $noreg, 0, $noreg
+# CHECK-NEXT: TEST64rr $rax, $rax
# CHECK-NEXT: JNE_1 %bb.7
name: foo
@@ -32,74 +32,74 @@
bb.0:
successors: %bb.1(0x40000000), %bb.7(0x40000000)
- TEST8ri %dl, 1, implicit-def %eflags, implicit killed %edx
- JE_1 %bb.7, implicit %eflags
+ TEST8ri $dl, 1, implicit-def $eflags, implicit killed $edx
+ JE_1 %bb.7, implicit $eflags
bb.1:
successors: %bb.16(0x80000000)
- %ebp = XOR32rr undef %ebp, undef %ebp, implicit-def dead %eflags
+ $ebp = XOR32rr undef $ebp, undef $ebp, implicit-def dead $eflags
JMP_1 %bb.16
bb.7:
successors: %bb.8(0x30000000), %bb.9(0x50000000)
- %rax = MOV64rm %r14, 1, %noreg, 0, %noreg :: (load 8)
- TEST64rr %rax, %rax, implicit-def %eflags
- JNE_1 %bb.9, implicit killed %eflags
+ $rax = MOV64rm $r14, 1, $noreg, 0, $noreg :: (load 8)
+ TEST64rr $rax, $rax, implicit-def $eflags
+ JNE_1 %bb.9, implicit killed $eflags
bb.8:
successors: %bb.16(0x80000000)
- %ebp = XOR32rr undef %ebp, undef %ebp, implicit-def dead %eflags
+ $ebp = XOR32rr undef $ebp, undef $ebp, implicit-def dead $eflags
JMP_1 %bb.16
bb.9:
successors: %bb.10(0x30000000), %bb.15(0x50000000)
- CMP64mi8 killed %rax, 1, %noreg, 8, %noreg, 0, implicit-def %eflags :: (load 8)
- JNE_1 %bb.15, implicit %eflags
+ CMP64mi8 killed $rax, 1, $noreg, 8, $noreg, 0, implicit-def $eflags :: (load 8)
+ JNE_1 %bb.15, implicit $eflags
bb.10:
successors: %bb.11(0x30000000), %bb.12(0x50000000)
- %rax = MOV64rm %r14, 1, %noreg, 0, %noreg :: (load 8)
- TEST64rr %rax, %rax, implicit-def %eflags
- JNE_1 %bb.12, implicit %eflags
+ $rax = MOV64rm $r14, 1, $noreg, 0, $noreg :: (load 8)
+ TEST64rr $rax, $rax, implicit-def $eflags
+ JNE_1 %bb.12, implicit $eflags
bb.11:
successors: %bb.16(0x80000000)
- %ebp = XOR32rr undef %ebp, undef %ebp, implicit-def dead %eflags
+ $ebp = XOR32rr undef $ebp, undef $ebp, implicit-def dead $eflags
JMP_1 %bb.16
bb.12:
successors: %bb.13(0x71555555), %bb.15(0x0eaaaaab)
- CMP64mi8 killed %rax, 1, %noreg, 8, %noreg, 0, implicit-def %eflags :: (load 8), (load 8)
- JNE_1 %bb.15, implicit %eflags
+ CMP64mi8 killed $rax, 1, $noreg, 8, $noreg, 0, implicit-def $eflags :: (load 8), (load 8)
+ JNE_1 %bb.15, implicit $eflags
bb.13:
successors: %bb.14(0x04000000), %bb.12(0x7c000000)
- %rax = MOV64rm %r14, 1, %noreg, 0, %noreg :: (load 8)
- TEST64rr %rax, %rax, implicit-def %eflags
- JNE_1 %bb.12, implicit %eflags
+ $rax = MOV64rm $r14, 1, $noreg, 0, $noreg :: (load 8)
+ TEST64rr $rax, $rax, implicit-def $eflags
+ JNE_1 %bb.12, implicit $eflags
bb.14:
successors: %bb.16(0x80000000)
- %ebp = XOR32rr undef %ebp, undef %ebp, implicit-def dead %eflags
+ $ebp = XOR32rr undef $ebp, undef $ebp, implicit-def dead $eflags
JMP_1 %bb.16
bb.15:
successors: %bb.16(0x80000000)
- %ebp = XOR32rr undef %ebp, undef %ebp, implicit-def dead %eflags
- dead %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags, implicit-def %al
+ $ebp = XOR32rr undef $ebp, undef $ebp, implicit-def dead $eflags
+ dead $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags, implicit-def $al
bb.16:
- RETQ %eax
+ RETQ $eax
...
diff --git a/llvm/test/CodeGen/X86/tail-merge-debugloc.ll b/llvm/test/CodeGen/X86/tail-merge-debugloc.ll
index 85ba0ab..3090e58 100644
--- a/llvm/test/CodeGen/X86/tail-merge-debugloc.ll
+++ b/llvm/test/CodeGen/X86/tail-merge-debugloc.ll
@@ -6,7 +6,7 @@
; location info.
;
; CHECK: [[DLOC:![0-9]+]] = !DILocation(line: 2, column: 2, scope: !{{[0-9]+}})
-; CHECK: TEST64rr{{.*}}%rsi, renamable %rsi, implicit-def %eflags
+; CHECK: TEST64rr{{.*}}$rsi, renamable $rsi, implicit-def $eflags
; CHECK-NEXT: JNE_1{{.*}}, debug-location [[DLOC]]
target triple = "x86_64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/X86/tbm-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/tbm-intrinsics-fast-isel.ll
index f45da31..a84ca3f 100644
--- a/llvm/test/CodeGen/X86/tbm-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/tbm-intrinsics-fast-isel.ll
@@ -28,7 +28,7 @@
;
; X64-LABEL: test__blcfill_u32:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal 1(%rdi), %eax
; X64-NEXT: andl %edi, %eax
; X64-NEXT: retq
@@ -48,7 +48,7 @@
;
; X64-LABEL: test__blci_u32:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal 1(%rdi), %eax
; X64-NEXT: xorl $-1, %eax
; X64-NEXT: orl %edi, %eax
@@ -93,7 +93,7 @@
;
; X64-LABEL: test__blcmsk_u32:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal 1(%rdi), %eax
; X64-NEXT: xorl %edi, %eax
; X64-NEXT: retq
@@ -112,7 +112,7 @@
;
; X64-LABEL: test__blcs_u32:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal 1(%rdi), %eax
; X64-NEXT: orl %edi, %eax
; X64-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/tbm_patterns.ll b/llvm/test/CodeGen/X86/tbm_patterns.ll
index 5fdfd57..c429de9 100644
--- a/llvm/test/CodeGen/X86/tbm_patterns.ll
+++ b/llvm/test/CodeGen/X86/tbm_patterns.ll
@@ -151,7 +151,7 @@
define i32 @test_x86_tbm_blcfill_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blcfill_u32_z2:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
; CHECK-NEXT: leal 1(%rdi), %eax
; CHECK-NEXT: testl %edi, %eax
; CHECK-NEXT: cmovnel %edx, %esi
@@ -230,7 +230,7 @@
define i32 @test_x86_tbm_blci_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blci_u32_z2:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
; CHECK-NEXT: leal 1(%rdi), %eax
; CHECK-NEXT: notl %eax
; CHECK-NEXT: orl %edi, %eax
@@ -419,7 +419,7 @@
define i32 @test_x86_tbm_blcmsk_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blcmsk_u32_z2:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
; CHECK-NEXT: leal 1(%rdi), %eax
; CHECK-NEXT: xorl %edi, %eax
; CHECK-NEXT: cmovnel %edx, %esi
@@ -496,7 +496,7 @@
define i32 @test_x86_tbm_blcs_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blcs_u32_z2:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
; CHECK-NEXT: leal 1(%rdi), %eax
; CHECK-NEXT: orl %edi, %eax
; CHECK-NEXT: cmovnel %edx, %esi
@@ -573,7 +573,7 @@
define i32 @test_x86_tbm_blsfill_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blsfill_u32_z2:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
; CHECK-NEXT: leal -1(%rdi), %eax
; CHECK-NEXT: orl %edi, %eax
; CHECK-NEXT: cmovnel %edx, %esi
diff --git a/llvm/test/CodeGen/X86/trunc-subvector.ll b/llvm/test/CodeGen/X86/trunc-subvector.ll
index 332bf58..b253828 100644
--- a/llvm/test/CodeGen/X86/trunc-subvector.ll
+++ b/llvm/test/CodeGen/X86/trunc-subvector.ll
@@ -11,7 +11,7 @@
;
; AVX-LABEL: test1:
; AVX: # %bb.0:
-; AVX-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%x = sext <8 x i32> %v to <8 x i64>
@@ -50,7 +50,7 @@
; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -77,14 +77,14 @@
; AVX2-LABEL: test4:
; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test4:
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%x = sext <8 x i32> %v to <8 x i64>
@@ -113,7 +113,7 @@
; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],ymm1[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -138,7 +138,7 @@
;
; AVX-LABEL: test6:
; AVX: # %bb.0:
-; AVX-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%x = zext <8 x i32> %v to <8 x i64>
@@ -176,7 +176,7 @@
; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -202,14 +202,14 @@
; AVX2-LABEL: test9:
; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test9:
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%x = zext <8 x i32> %v to <8 x i64>
@@ -234,7 +234,7 @@
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],ymm1[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/umul-with-overflow.ll b/llvm/test/CodeGen/X86/umul-with-overflow.ll
index 5a57f9f..5d001a9 100644
--- a/llvm/test/CodeGen/X86/umul-with-overflow.ll
+++ b/llvm/test/CodeGen/X86/umul-with-overflow.ll
@@ -35,7 +35,7 @@
;
; X64-LABEL: test2:
; X64: # %bb.0: # %entry
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: addl %esi, %edi
; X64-NEXT: leal (%rdi,%rdi), %eax
; X64-NEXT: retq
@@ -57,8 +57,8 @@
;
; X64-LABEL: test3:
; X64: # %bb.0: # %entry
-; X64-NEXT: # kill: def %esi killed %esi def %rsi
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: leal (%rdi,%rsi), %eax
; X64-NEXT: movl $4, %ecx
; X64-NEXT: mull %ecx
diff --git a/llvm/test/CodeGen/X86/unreachable-mbb-undef-phi.mir b/llvm/test/CodeGen/X86/unreachable-mbb-undef-phi.mir
index 52867e5..0839475 100644
--- a/llvm/test/CodeGen/X86/unreachable-mbb-undef-phi.mir
+++ b/llvm/test/CodeGen/X86/unreachable-mbb-undef-phi.mir
@@ -13,7 +13,7 @@
bb.1:
%1 = PHI %0, %bb.0, %2, %bb.2
- %2 = ADD32ri8 killed %1, 1, implicit-def %eflags
+ %2 = ADD32ri8 killed %1, 1, implicit-def $eflags
JMP_1 %bb.3
bb.2:
diff --git a/llvm/test/CodeGen/X86/update-terminator-debugloc.ll b/llvm/test/CodeGen/X86/update-terminator-debugloc.ll
index 17b98c3..9443e7a 100644
--- a/llvm/test/CodeGen/X86/update-terminator-debugloc.ll
+++ b/llvm/test/CodeGen/X86/update-terminator-debugloc.ll
@@ -21,8 +21,8 @@
; these debug locations are propaged correctly to lowered instructions.
;
; CHECK: [[DLOC:![0-9]+]] = !DILocation(line: 6
-; CHECK-DAG: [[VREG1:%[^ ]+]]:gr64 = COPY %rsi
-; CHECK-DAG: [[VREG2:%[^ ]+]]:gr64 = COPY %rdi
+; CHECK-DAG: [[VREG1:%[^ ]+]]:gr64 = COPY $rsi
+; CHECK-DAG: [[VREG2:%[^ ]+]]:gr64 = COPY $rdi
; CHECK: SUB64rr [[VREG2]], [[VREG1]]
; CHECK-NEXT: JNE_1 {{.*}}, debug-location [[DLOC]]{{$}}
; CHECK: [[VREG3:%[^ ]+]]:gr64 = PHI [[VREG2]]
diff --git a/llvm/test/CodeGen/X86/update-terminator.mir b/llvm/test/CodeGen/X86/update-terminator.mir
index 4515521..3ca52d9 100644
--- a/llvm/test/CodeGen/X86/update-terminator.mir
+++ b/llvm/test/CodeGen/X86/update-terminator.mir
@@ -48,29 +48,29 @@
bb.0 (%ir-block.0):
successors: %bb.1(50), %bb.3(50)
- JNE_1 %bb.1, implicit %eflags
+ JNE_1 %bb.1, implicit $eflags
JMP_1 %bb.3
bb.1:
successors: %bb.2(100)
- CALL64pcrel32 @dummy1, csr_64, implicit %rsp, implicit-def %rsp
- CALL64pcrel32 @dummy1, csr_64, implicit %rsp, implicit-def %rsp
- CALL64pcrel32 @dummy1, csr_64, implicit %rsp, implicit-def %rsp
- JNE_1 %bb.2, implicit %eflags
+ CALL64pcrel32 @dummy1, csr_64, implicit $rsp, implicit-def $rsp
+ CALL64pcrel32 @dummy1, csr_64, implicit $rsp, implicit-def $rsp
+ CALL64pcrel32 @dummy1, csr_64, implicit $rsp, implicit-def $rsp
+ JNE_1 %bb.2, implicit $eflags
bb.2:
successors: %bb.4(100)
- CALL64pcrel32 @dummy2, csr_64, implicit %rsp, implicit-def %rsp
- CALL64pcrel32 @dummy2, csr_64, implicit %rsp, implicit-def %rsp
- CALL64pcrel32 @dummy2, csr_64, implicit %rsp, implicit-def %rsp
+ CALL64pcrel32 @dummy2, csr_64, implicit $rsp, implicit-def $rsp
+ CALL64pcrel32 @dummy2, csr_64, implicit $rsp, implicit-def $rsp
+ CALL64pcrel32 @dummy2, csr_64, implicit $rsp, implicit-def $rsp
JMP_1 %bb.4
bb.3:
successors: %bb.2(100)
- CALL64pcrel32 @dummy3, csr_64, implicit %rsp, implicit-def %rsp
- CALL64pcrel32 @dummy3, csr_64, implicit %rsp, implicit-def %rsp
- CALL64pcrel32 @dummy3, csr_64, implicit %rsp, implicit-def %rsp
+ CALL64pcrel32 @dummy3, csr_64, implicit $rsp, implicit-def $rsp
+ CALL64pcrel32 @dummy3, csr_64, implicit $rsp, implicit-def $rsp
+ CALL64pcrel32 @dummy3, csr_64, implicit $rsp, implicit-def $rsp
JMP_1 %bb.2
bb.4:
diff --git a/llvm/test/CodeGen/X86/urem-i8-constant.ll b/llvm/test/CodeGen/X86/urem-i8-constant.ll
index 03d12dd..d4fd92c 100644
--- a/llvm/test/CodeGen/X86/urem-i8-constant.ll
+++ b/llvm/test/CodeGen/X86/urem-i8-constant.ll
@@ -10,7 +10,7 @@
; CHECK-NEXT: imull $111, %ecx, %eax
; CHECK-NEXT: shrl $12, %eax
; CHECK-NEXT: movb $37, %dl
-; CHECK-NEXT: # kill: def %al killed %al killed %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: mulb %dl
; CHECK-NEXT: subb %al, %cl
; CHECK-NEXT: movl %ecx, %eax
diff --git a/llvm/test/CodeGen/X86/urem-power-of-two.ll b/llvm/test/CodeGen/X86/urem-power-of-two.ll
index 6760239..5517c35 100644
--- a/llvm/test/CodeGen/X86/urem-power-of-two.ll
+++ b/llvm/test/CodeGen/X86/urem-power-of-two.ll
@@ -56,7 +56,7 @@
; X86-NEXT: shrl %cl, %eax
; X86-NEXT: decl %eax
; X86-NEXT: andw {{[0-9]+}}(%esp), %ax
-; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: shift_right_pow_2:
@@ -66,7 +66,7 @@
; X64-NEXT: shrl %cl, %eax
; X64-NEXT: decl %eax
; X64-NEXT: andl %edi, %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%shr = lshr i16 -32768, %y
%urem = urem i16 %x, %shr
@@ -81,20 +81,20 @@
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
; X86-NEXT: andb $4, %cl
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: # kill: def %eax killed %eax def %ax
+; X86-NEXT: # kill: def $eax killed $eax def $ax
; X86-NEXT: divb %cl
; X86-NEXT: movzbl %ah, %eax
-; X86-NEXT: # kill: def %al killed %al killed %eax
+; X86-NEXT: # kill: def $al killed $al killed $eax
; X86-NEXT: retl
;
; X64-LABEL: and_pow_2:
; X64: # %bb.0:
; X64-NEXT: andb $4, %sil
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: # kill: def %eax killed %eax def %ax
+; X64-NEXT: # kill: def $eax killed $eax def $ax
; X64-NEXT: divb %sil
; X64-NEXT: movzbl %ah, %eax
-; X64-NEXT: # kill: def %al killed %al killed %eax
+; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
%and = and i8 %y, 4
%urem = urem i8 %x, %and
diff --git a/llvm/test/CodeGen/X86/var-permute-256.ll b/llvm/test/CodeGen/X86/var-permute-256.ll
index 1df6f7e..138f07c 100644
--- a/llvm/test/CodeGen/X86/var-permute-256.ll
+++ b/llvm/test/CodeGen/X86/var-permute-256.ll
@@ -1340,13 +1340,13 @@
;
; AVX512VL-LABEL: var_shuffle_v4i64_from_v2i64:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX512VL-NEXT: vpermpd %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512VLBW-LABEL: var_shuffle_v4i64_from_v2i64:
; AVX512VLBW: # %bb.0:
-; AVX512VLBW-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX512VLBW-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX512VLBW-NEXT: vpermpd %ymm0, %ymm1, %ymm0
; AVX512VLBW-NEXT: retq
%index0 = extractelement <4 x i64> %indices, i32 0
@@ -1398,7 +1398,7 @@
;
; INT256-LABEL: var_shuffle_v8i32_from_v4i32:
; INT256: # %bb.0: # %entry
-; INT256-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; INT256-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; INT256-NEXT: vpermps %ymm0, %ymm1, %ymm0
; INT256-NEXT: retq
entry:
@@ -1660,7 +1660,7 @@
;
; AVX512VLBW-LABEL: var_shuffle_v16i16_from_v8i16:
; AVX512VLBW: # %bb.0:
-; AVX512VLBW-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX512VLBW-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX512VLBW-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VLBW-NEXT: retq
%index0 = extractelement <16 x i16> %indices, i32 0
@@ -2201,7 +2201,7 @@
;
; VBMI-LABEL: var_shuffle_v32i8_from_v16i8:
; VBMI: # %bb.0:
-; VBMI-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; VBMI-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; VBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0
; VBMI-NEXT: retq
%index0 = extractelement <32 x i8> %indices, i32 0
@@ -2363,13 +2363,13 @@
;
; AVX512VL-LABEL: var_shuffle_v4f64_from_v2f64:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX512VL-NEXT: vpermpd %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512VLBW-LABEL: var_shuffle_v4f64_from_v2f64:
; AVX512VLBW: # %bb.0:
-; AVX512VLBW-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX512VLBW-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX512VLBW-NEXT: vpermpd %ymm0, %ymm1, %ymm0
; AVX512VLBW-NEXT: retq
%index0 = extractelement <4 x i64> %indices, i32 0
@@ -2421,7 +2421,7 @@
;
; INT256-LABEL: var_shuffle_v8f32_from_v4f32:
; INT256: # %bb.0: # %entry
-; INT256-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; INT256-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; INT256-NEXT: vpermps %ymm0, %ymm1, %ymm0
; INT256-NEXT: retq
entry:
diff --git a/llvm/test/CodeGen/X86/vec_cmp_sint-128.ll b/llvm/test/CodeGen/X86/vec_cmp_sint-128.ll
index 6ecd7c7..daf389f 100644
--- a/llvm/test/CodeGen/X86/vec_cmp_sint-128.ll
+++ b/llvm/test/CodeGen/X86/vec_cmp_sint-128.ll
@@ -156,7 +156,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp ne <2 x i64> %a, %b
@@ -195,7 +195,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp ne <4 x i32> %a, %b
@@ -234,7 +234,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp ne <8 x i16> %a, %b
@@ -273,7 +273,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp ne <16 x i8> %a, %b
@@ -350,7 +350,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; AVX512-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp sge <2 x i64> %a, %b
@@ -389,7 +389,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
; AVX512-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp sge <4 x i32> %a, %b
@@ -428,7 +428,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
; AVX512-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp sge <8 x i16> %a, %b
@@ -467,7 +467,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
; AVX512-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp sge <16 x i8> %a, %b
@@ -658,7 +658,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp sle <2 x i64> %a, %b
@@ -697,7 +697,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp sle <4 x i32> %a, %b
@@ -736,7 +736,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp sle <8 x i16> %a, %b
@@ -775,7 +775,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp sle <16 x i8> %a, %b
diff --git a/llvm/test/CodeGen/X86/vec_cmp_uint-128.ll b/llvm/test/CodeGen/X86/vec_cmp_uint-128.ll
index 9b2c4b9..3c606ba 100644
--- a/llvm/test/CodeGen/X86/vec_cmp_uint-128.ll
+++ b/llvm/test/CodeGen/X86/vec_cmp_uint-128.ll
@@ -156,7 +156,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp ne <2 x i64> %a, %b
@@ -195,7 +195,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp ne <4 x i32> %a, %b
@@ -234,7 +234,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp ne <8 x i16> %a, %b
@@ -273,7 +273,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp ne <16 x i8> %a, %b
@@ -357,8 +357,8 @@
;
; AVX512-LABEL: ge_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm1
; AVX512-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -693,8 +693,8 @@
;
; AVX512-LABEL: le_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm1
; AVX512-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
diff --git a/llvm/test/CodeGen/X86/vec_fp_to_int.ll b/llvm/test/CodeGen/X86/vec_fp_to_int.ll
index 51f228b..5c835a1 100644
--- a/llvm/test/CodeGen/X86/vec_fp_to_int.ll
+++ b/llvm/test/CodeGen/X86/vec_fp_to_int.ll
@@ -60,9 +60,9 @@
;
; AVX512DQ-LABEL: fptosi_2f64_to_2i64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vcvttpd2qq %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -115,7 +115,7 @@
;
; AVX-LABEL: fptosi_4f64_to_2i32:
; AVX: # %bb.0:
-; AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX-NEXT: vcvttpd2dq %ymm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
@@ -217,9 +217,9 @@
;
; AVX512DQ-LABEL: fptosi_4f64_to_4i64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vcvttpd2qq %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_4f64_to_4i64:
@@ -321,9 +321,9 @@
;
; AVX512DQ-LABEL: fptoui_2f64_to_2i64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vcvttpd2uqq %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -388,7 +388,7 @@
;
; AVX512F-LABEL: fptoui_2f64_to_4i32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; AVX512F-NEXT: vzeroupper
@@ -401,7 +401,7 @@
;
; AVX512DQ-LABEL: fptoui_2f64_to_4i32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; AVX512DQ-NEXT: vzeroupper
@@ -467,9 +467,9 @@
;
; AVX512F-LABEL: fptoui_2f64_to_2i32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -480,9 +480,9 @@
;
; AVX512DQ-LABEL: fptoui_2f64_to_2i32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -542,30 +542,30 @@
;
; AVX512F-LABEL: fptoui_4f64_to_2i32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_4f64_to_2i32:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX512VL-NEXT: vcvttpd2udq %ymm0, %xmm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_4f64_to_2i32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f64_to_2i32:
; AVX512VLDQ: # %bb.0:
-; AVX512VLDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX512VLDQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX512VLDQ-NEXT: vcvttpd2udq %ymm0, %xmm0
; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
@@ -736,9 +736,9 @@
;
; AVX512DQ-LABEL: fptoui_4f64_to_4i64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vcvttpd2uqq %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f64_to_4i64:
@@ -812,9 +812,9 @@
;
; AVX512F-LABEL: fptoui_4f64_to_4i32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -826,9 +826,9 @@
;
; AVX512DQ-LABEL: fptoui_4f64_to_4i32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -919,9 +919,9 @@
;
; AVX512DQ-LABEL: fptosi_2f32_to_2i64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -978,16 +978,16 @@
;
; AVX512DQ-LABEL: fptosi_4f32_to_2i64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_4f32_to_2i64:
; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2qq %xmm0, %ymm0
-; AVX512VLDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512VLDQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
%cvt = fptosi <4 x float> %a to <4 x i64>
@@ -1106,7 +1106,7 @@
; AVX512DQ-LABEL: fptosi_4f32_to_4i64:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_4f32_to_4i64:
@@ -1214,13 +1214,13 @@
; AVX512DQ-LABEL: fptosi_8f32_to_4i64:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_8f32_to_4i64:
; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2qq %ymm0, %zmm0
-; AVX512VLDQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512VLDQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512VLDQ-NEXT: retq
%cvt = fptosi <8 x float> %a to <8 x i64>
%shuf = shufflevector <8 x i64> %cvt, <8 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -1281,7 +1281,7 @@
;
; AVX512F-LABEL: fptoui_2f32_to_2i32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0
; AVX512F-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX512F-NEXT: vzeroupper
@@ -1295,7 +1295,7 @@
;
; AVX512DQ-LABEL: fptoui_2f32_to_2i32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vcvttps2udq %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX512DQ-NEXT: vzeroupper
@@ -1349,9 +1349,9 @@
;
; AVX512F-LABEL: fptoui_4f32_to_4i32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -1362,9 +1362,9 @@
;
; AVX512DQ-LABEL: fptoui_4f32_to_4i32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vcvttps2udq %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -1446,9 +1446,9 @@
;
; AVX512DQ-LABEL: fptoui_2f32_to_2i64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX512DQ-NEXT: vcvttps2uqq %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -1531,16 +1531,16 @@
;
; AVX512DQ-LABEL: fptoui_4f32_to_2i64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX512DQ-NEXT: vcvttps2uqq %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f32_to_2i64:
; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2uqq %xmm0, %ymm0
-; AVX512VLDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512VLDQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
%cvt = fptoui <4 x float> %a to <4 x i64>
@@ -1644,9 +1644,9 @@
;
; AVX512F-LABEL: fptoui_8f32_to_8i32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_8f32_to_8i32:
@@ -1656,9 +1656,9 @@
;
; AVX512DQ-LABEL: fptoui_8f32_to_8i32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vcvttps2udq %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_8f32_to_8i32:
@@ -1835,7 +1835,7 @@
; AVX512DQ-LABEL: fptoui_4f32_to_4i64:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvttps2uqq %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f32_to_4i64:
@@ -2013,13 +2013,13 @@
; AVX512DQ-LABEL: fptoui_8f32_to_4i64:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvttps2uqq %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_8f32_to_4i64:
; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2uqq %ymm0, %zmm0
-; AVX512VLDQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512VLDQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512VLDQ-NEXT: retq
%cvt = fptoui <8 x float> %a to <8 x i64>
%shuf = shufflevector <8 x i64> %cvt, <8 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
diff --git a/llvm/test/CodeGen/X86/vec_ins_extract-1.ll b/llvm/test/CodeGen/X86/vec_ins_extract-1.ll
index 949ef56..cf70d5d 100644
--- a/llvm/test/CodeGen/X86/vec_ins_extract-1.ll
+++ b/llvm/test/CodeGen/X86/vec_ins_extract-1.ll
@@ -22,7 +22,7 @@
;
; X64-LABEL: t0:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: andl $3, %edi
; X64-NEXT: movl $76, -24(%rsp,%rdi,4)
@@ -51,7 +51,7 @@
;
; X64-LABEL: t1:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: movl $76, %eax
; X64-NEXT: pinsrd $0, %eax, %xmm0
; X64-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
@@ -79,7 +79,7 @@
;
; X64-LABEL: t2:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: andl $3, %edi
; X64-NEXT: pinsrd $0, -24(%rsp,%rdi,4), %xmm0
@@ -106,7 +106,7 @@
;
; X64-LABEL: t3:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: andl $3, %edi
; X64-NEXT: movss %xmm0, -24(%rsp,%rdi,4)
diff --git a/llvm/test/CodeGen/X86/vec_insert-4.ll b/llvm/test/CodeGen/X86/vec_insert-4.ll
index 0602165..2c34b3b 100644
--- a/llvm/test/CodeGen/X86/vec_insert-4.ll
+++ b/llvm/test/CodeGen/X86/vec_insert-4.ll
@@ -26,7 +26,7 @@
; X64-NEXT: movq %rsp, %rbp
; X64-NEXT: andq $-32, %rsp
; X64-NEXT: subq $64, %rsp
-; X64-NEXT: ## kill: def %edi killed %edi def %rdi
+; X64-NEXT: ## kill: def $edi killed $edi def $rdi
; X64-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp)
; X64-NEXT: movaps %xmm0, (%rsp)
; X64-NEXT: andl $7, %edi
diff --git a/llvm/test/CodeGen/X86/vec_insert-5.ll b/llvm/test/CodeGen/X86/vec_insert-5.ll
index d4a0c82..b0637bd 100644
--- a/llvm/test/CodeGen/X86/vec_insert-5.ll
+++ b/llvm/test/CodeGen/X86/vec_insert-5.ll
@@ -17,7 +17,7 @@
;
; X64-LABEL: t1:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: shll $12, %edi
; X64-NEXT: movq %rdi, %xmm0
; X64-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
diff --git a/llvm/test/CodeGen/X86/vec_insert-8.ll b/llvm/test/CodeGen/X86/vec_insert-8.ll
index a421ff2..a89b967 100644
--- a/llvm/test/CodeGen/X86/vec_insert-8.ll
+++ b/llvm/test/CodeGen/X86/vec_insert-8.ll
@@ -23,7 +23,7 @@
;
; X64-LABEL: var_insert:
; X64: # %bb.0: # %entry
-; X64-NEXT: # kill: def %esi killed %esi def %rsi
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: andl $3, %esi
; X64-NEXT: movl %edi, -24(%rsp,%rsi,4)
@@ -51,7 +51,7 @@
;
; X64-LABEL: var_extract:
; X64: # %bb.0: # %entry
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: andl $3, %edi
; X64-NEXT: movl -24(%rsp,%rdi,4), %eax
diff --git a/llvm/test/CodeGen/X86/vec_insert-mmx.ll b/llvm/test/CodeGen/X86/vec_insert-mmx.ll
index 39e21e9..c0c244d 100644
--- a/llvm/test/CodeGen/X86/vec_insert-mmx.ll
+++ b/llvm/test/CodeGen/X86/vec_insert-mmx.ll
@@ -16,7 +16,7 @@
;
; X64-LABEL: t0:
; X64: ## %bb.0:
-; X64-NEXT: ## kill: def %edi killed %edi def %rdi
+; X64-NEXT: ## kill: def $edi killed $edi def $rdi
; X64-NEXT: movq %rdi, %xmm0
; X64-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
diff --git a/llvm/test/CodeGen/X86/vec_int_to_fp.ll b/llvm/test/CodeGen/X86/vec_int_to_fp.ll
index 30ba727..355fa68 100644
--- a/llvm/test/CodeGen/X86/vec_int_to_fp.ll
+++ b/llvm/test/CodeGen/X86/vec_int_to_fp.ll
@@ -58,9 +58,9 @@
;
; AVX512DQ-LABEL: sitofp_2i64_to_2f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -96,7 +96,7 @@
; AVX-LABEL: sitofp_4i32_to_2f64:
; AVX: # %bb.0:
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%cvt = sitofp <4 x i32> %a to <4 x double>
@@ -134,7 +134,7 @@
; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -142,7 +142,7 @@
; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -150,7 +150,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = sitofp <8 x i16> %a to <8 x double>
@@ -190,7 +190,7 @@
; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm0
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -198,7 +198,7 @@
; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -206,7 +206,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = sitofp <16 x i8> %a to <16 x double>
@@ -301,9 +301,9 @@
;
; AVX512DQ-LABEL: sitofp_4i64_to_4f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_4i64_to_4f64:
@@ -377,7 +377,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
%cvt = sitofp <8 x i16> %a to <8 x double>
%shuf = shufflevector <8 x double> %cvt, <8 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -432,7 +432,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
%cvt = sitofp <16 x i8> %a to <16 x double>
%shuf = shufflevector <16 x double> %cvt, <16 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -492,9 +492,9 @@
;
; AVX512DQ-LABEL: uitofp_2i64_to_2f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -531,9 +531,9 @@
;
; AVX512F-LABEL: uitofp_2i32_to_2f64:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -544,9 +544,9 @@
;
; AVX512DQ-LABEL: uitofp_2i32_to_2f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -580,7 +580,7 @@
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX1-NEXT: vmulpd {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: vaddpd %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -594,37 +594,37 @@
; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX2-NEXT: vaddpd %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-LABEL: uitofp_4i32_to_2f64:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_4i32_to_2f64:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvtudq2pd %xmm0, %ymm0
-; AVX512VL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_4i32_to_2f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i32_to_2f64:
; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtudq2pd %xmm0, %ymm0
-; AVX512VLDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512VLDQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
%cvt = uitofp <4 x i32> %a to <4 x double>
@@ -662,7 +662,7 @@
; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -670,7 +670,7 @@
; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -678,7 +678,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = uitofp <8 x i16> %a to <8 x double>
@@ -718,7 +718,7 @@
; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -726,7 +726,7 @@
; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -734,7 +734,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = uitofp <16 x i8> %a to <16 x double>
@@ -823,9 +823,9 @@
;
; AVX512DQ-LABEL: uitofp_4i64_to_4f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i64_to_4f64:
@@ -883,9 +883,9 @@
;
; AVX512F-LABEL: uitofp_4i32_to_4f64:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_4i32_to_4f64:
@@ -895,9 +895,9 @@
;
; AVX512DQ-LABEL: uitofp_4i32_to_4f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i32_to_4f64:
@@ -956,7 +956,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
%cvt = uitofp <8 x i16> %a to <8 x double>
%shuf = shufflevector <8 x double> %cvt, <8 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -1013,7 +1013,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
%cvt = uitofp <16 x i8> %a to <16 x double>
%shuf = shufflevector <16 x double> %cvt, <16 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -1072,9 +1072,9 @@
;
; AVX512DQ-LABEL: sitofp_2i64_to_4f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -1131,7 +1131,7 @@
;
; AVX512DQ-LABEL: sitofp_2i64_to_4f32_zero:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; AVX512DQ-NEXT: vzeroupper
@@ -1197,15 +1197,15 @@
;
; AVX512DQ-LABEL: sitofp_4i64_to_4f32_undef:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_4i64_to_4f32_undef:
; AVX512VLDQ: # %bb.0:
-; AVX512VLDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX512VLDQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX512VLDQ-NEXT: vcvtqq2ps %ymm0, %xmm0
; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
@@ -1261,7 +1261,7 @@
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -1269,7 +1269,7 @@
; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -1277,7 +1277,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = sitofp <8 x i16> %a to <8 x float>
@@ -1320,7 +1320,7 @@
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -1328,7 +1328,7 @@
; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -1336,7 +1336,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512-NEXT: vcvtdq2ps %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = sitofp <16 x i8> %a to <16 x float>
@@ -1436,9 +1436,9 @@
;
; AVX512DQ-LABEL: sitofp_4i64_to_4f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -1576,7 +1576,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512-NEXT: vcvtdq2ps %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
%cvt = sitofp <16 x i8> %a to <16 x float>
%shuf = shufflevector <16 x float> %cvt, <16 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -1691,9 +1691,9 @@
;
; AVX512DQ-LABEL: uitofp_2i64_to_4f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -1800,7 +1800,7 @@
;
; AVX512DQ-LABEL: uitofp_2i64_to_2f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; AVX512DQ-NEXT: vzeroupper
@@ -1927,15 +1927,15 @@
;
; AVX512DQ-LABEL: uitofp_4i64_to_4f32_undef:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i64_to_4f32_undef:
; AVX512VLDQ: # %bb.0:
-; AVX512VLDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX512VLDQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX512VLDQ-NEXT: vcvtuqq2ps %ymm0, %xmm0
; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
@@ -1979,9 +1979,9 @@
;
; AVX512F-LABEL: uitofp_4i32_to_4f32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -1992,9 +1992,9 @@
;
; AVX512DQ-LABEL: uitofp_4i32_to_4f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -2039,7 +2039,7 @@
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -2047,7 +2047,7 @@
; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -2055,7 +2055,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = uitofp <8 x i16> %a to <8 x float>
@@ -2098,7 +2098,7 @@
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -2106,7 +2106,7 @@
; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -2114,7 +2114,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vcvtdq2ps %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = uitofp <16 x i8> %a to <16 x float>
@@ -2361,9 +2361,9 @@
;
; AVX512DQ-LABEL: uitofp_4i64_to_4f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -2425,9 +2425,9 @@
;
; AVX512F-LABEL: uitofp_8i32_to_8f32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_8i32_to_8f32:
@@ -2437,9 +2437,9 @@
;
; AVX512DQ-LABEL: uitofp_8i32_to_8f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_8i32_to_8f32:
@@ -2556,7 +2556,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vcvtdq2ps %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
%cvt = uitofp <16 x i8> %a to <16 x float>
%shuf = shufflevector <16 x float> %cvt, <16 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -2614,7 +2614,7 @@
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -2778,7 +2778,7 @@
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_load_4i64_to_4f64:
@@ -2910,7 +2910,7 @@
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0
; AVX512DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -2952,7 +2952,7 @@
; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -2965,7 +2965,7 @@
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -3108,7 +3108,7 @@
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_4i64_to_4f64:
@@ -3172,7 +3172,7 @@
; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps (%rdi), %xmm0
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_load_4i32_to_4f64:
@@ -3184,7 +3184,7 @@
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_4i32_to_4f64:
@@ -3342,7 +3342,7 @@
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -3933,7 +3933,7 @@
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -3986,7 +3986,7 @@
; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps (%rdi), %xmm0
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -3999,7 +3999,7 @@
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0
; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -4575,7 +4575,7 @@
; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps (%rdi), %ymm0
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_load_8i32_to_8f32:
@@ -4587,7 +4587,7 @@
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_8i32_to_8f32:
diff --git a/llvm/test/CodeGen/X86/vec_minmax_sint.ll b/llvm/test/CodeGen/X86/vec_minmax_sint.ll
index df1699a..6ae1abe 100644
--- a/llvm/test/CodeGen/X86/vec_minmax_sint.ll
+++ b/llvm/test/CodeGen/X86/vec_minmax_sint.ll
@@ -72,10 +72,10 @@
;
; AVX512-LABEL: max_gt_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp sgt <2 x i64> %a, %b
@@ -183,10 +183,10 @@
;
; AVX512-LABEL: max_gt_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
%1 = icmp sgt <4 x i64> %a, %b
%2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b
@@ -479,10 +479,10 @@
;
; AVX512-LABEL: max_ge_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp sge <2 x i64> %a, %b
@@ -608,10 +608,10 @@
;
; AVX512-LABEL: max_ge_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
%1 = icmp sge <4 x i64> %a, %b
%2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b
@@ -905,10 +905,10 @@
;
; AVX512-LABEL: min_lt_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512-NEXT: vpminsq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp slt <2 x i64> %a, %b
@@ -1017,10 +1017,10 @@
;
; AVX512-LABEL: min_lt_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpminsq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
%1 = icmp slt <4 x i64> %a, %b
%2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b
@@ -1306,10 +1306,10 @@
;
; AVX512-LABEL: min_le_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512-NEXT: vpminsq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp sle <2 x i64> %a, %b
@@ -1434,10 +1434,10 @@
;
; AVX512-LABEL: min_le_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpminsq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
%1 = icmp sle <4 x i64> %a, %b
%2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b
diff --git a/llvm/test/CodeGen/X86/vec_minmax_uint.ll b/llvm/test/CodeGen/X86/vec_minmax_uint.ll
index 294d10c..21c4407 100644
--- a/llvm/test/CodeGen/X86/vec_minmax_uint.ll
+++ b/llvm/test/CodeGen/X86/vec_minmax_uint.ll
@@ -82,10 +82,10 @@
;
; AVX512-LABEL: max_gt_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp ugt <2 x i64> %a, %b
@@ -208,10 +208,10 @@
;
; AVX512-LABEL: max_gt_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
%1 = icmp ugt <4 x i64> %a, %b
%2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b
@@ -526,10 +526,10 @@
;
; AVX512-LABEL: max_ge_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp uge <2 x i64> %a, %b
@@ -669,10 +669,10 @@
;
; AVX512-LABEL: max_ge_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
%1 = icmp uge <4 x i64> %a, %b
%2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b
@@ -980,10 +980,10 @@
;
; AVX512-LABEL: min_lt_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp ult <2 x i64> %a, %b
@@ -1106,10 +1106,10 @@
;
; AVX512-LABEL: min_lt_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
%1 = icmp ult <4 x i64> %a, %b
%2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b
@@ -1423,10 +1423,10 @@
;
; AVX512-LABEL: min_le_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp ule <2 x i64> %a, %b
@@ -1566,10 +1566,10 @@
;
; AVX512-LABEL: min_le_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
%1 = icmp ule <4 x i64> %a, %b
%2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b
diff --git a/llvm/test/CodeGen/X86/vec_ss_load_fold.ll b/llvm/test/CodeGen/X86/vec_ss_load_fold.ll
index 87634a9..8377c01 100644
--- a/llvm/test/CodeGen/X86/vec_ss_load_fold.ll
+++ b/llvm/test/CodeGen/X86/vec_ss_load_fold.ll
@@ -17,7 +17,7 @@
; X32-NEXT: minss LCPI0_2, %xmm0
; X32-NEXT: maxss %xmm1, %xmm0
; X32-NEXT: cvttss2si %xmm0, %eax
-; X32-NEXT: ## kill: def %ax killed %ax killed %eax
+; X32-NEXT: ## kill: def $ax killed $ax killed $eax
; X32-NEXT: retl
;
; X64-LABEL: test1:
@@ -29,7 +29,7 @@
; X64-NEXT: minss {{.*}}(%rip), %xmm0
; X64-NEXT: maxss %xmm1, %xmm0
; X64-NEXT: cvttss2si %xmm0, %eax
-; X64-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
;
; X32_AVX1-LABEL: test1:
@@ -42,7 +42,7 @@
; X32_AVX1-NEXT: vminss LCPI0_2, %xmm0, %xmm0
; X32_AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm0
; X32_AVX1-NEXT: vcvttss2si %xmm0, %eax
-; X32_AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
+; X32_AVX1-NEXT: ## kill: def $ax killed $ax killed $eax
; X32_AVX1-NEXT: retl
;
; X64_AVX1-LABEL: test1:
@@ -54,7 +54,7 @@
; X64_AVX1-NEXT: vminss {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm0
; X64_AVX1-NEXT: vcvttss2si %xmm0, %eax
-; X64_AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64_AVX1-NEXT: ## kill: def $ax killed $ax killed $eax
; X64_AVX1-NEXT: retq
;
; X32_AVX512-LABEL: test1:
@@ -67,7 +67,7 @@
; X32_AVX512-NEXT: vminss LCPI0_2, %xmm0, %xmm0
; X32_AVX512-NEXT: vmaxss %xmm1, %xmm0, %xmm0
; X32_AVX512-NEXT: vcvttss2si %xmm0, %eax
-; X32_AVX512-NEXT: ## kill: def %ax killed %ax killed %eax
+; X32_AVX512-NEXT: ## kill: def $ax killed $ax killed $eax
; X32_AVX512-NEXT: retl
;
; X64_AVX512-LABEL: test1:
@@ -79,7 +79,7 @@
; X64_AVX512-NEXT: vminss {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX512-NEXT: vmaxss %xmm1, %xmm0, %xmm0
; X64_AVX512-NEXT: vcvttss2si %xmm0, %eax
-; X64_AVX512-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64_AVX512-NEXT: ## kill: def $ax killed $ax killed $eax
; X64_AVX512-NEXT: retq
%tmp = insertelement <4 x float> undef, float %f, i32 0 ; <<4 x float>> [#uses=1]
%tmp10 = insertelement <4 x float> %tmp, float 0.000000e+00, i32 1 ; <<4 x float>> [#uses=1]
@@ -104,7 +104,7 @@
; X32-NEXT: xorps %xmm1, %xmm1
; X32-NEXT: maxss %xmm1, %xmm0
; X32-NEXT: cvttss2si %xmm0, %eax
-; X32-NEXT: ## kill: def %ax killed %ax killed %eax
+; X32-NEXT: ## kill: def $ax killed $ax killed $eax
; X32-NEXT: retl
;
; X64-LABEL: test2:
@@ -115,7 +115,7 @@
; X64-NEXT: xorps %xmm1, %xmm1
; X64-NEXT: maxss %xmm1, %xmm0
; X64-NEXT: cvttss2si %xmm0, %eax
-; X64-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64-NEXT: ## kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
;
; X32_AVX-LABEL: test2:
@@ -127,7 +127,7 @@
; X32_AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32_AVX-NEXT: vmaxss %xmm1, %xmm0, %xmm0
; X32_AVX-NEXT: vcvttss2si %xmm0, %eax
-; X32_AVX-NEXT: ## kill: def %ax killed %ax killed %eax
+; X32_AVX-NEXT: ## kill: def $ax killed $ax killed $eax
; X32_AVX-NEXT: retl
;
; X64_AVX-LABEL: test2:
@@ -138,7 +138,7 @@
; X64_AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64_AVX-NEXT: vmaxss %xmm1, %xmm0, %xmm0
; X64_AVX-NEXT: vcvttss2si %xmm0, %eax
-; X64_AVX-NEXT: ## kill: def %ax killed %ax killed %eax
+; X64_AVX-NEXT: ## kill: def $ax killed $ax killed $eax
; X64_AVX-NEXT: retq
%tmp28 = fsub float %f, 1.000000e+00 ; <float> [#uses=1]
%tmp37 = fmul float %tmp28, 5.000000e-01 ; <float> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/vector-bitreverse.ll b/llvm/test/CodeGen/X86/vector-bitreverse.ll
index 1e8a693..c0a41d0 100644
--- a/llvm/test/CodeGen/X86/vector-bitreverse.ll
+++ b/llvm/test/CodeGen/X86/vector-bitreverse.ll
@@ -53,7 +53,7 @@
; XOP-NEXT: vmovd %edi, %xmm0
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: vpextrb $0, %xmm0, %eax
-; XOP-NEXT: # kill: def %al killed %al killed %eax
+; XOP-NEXT: # kill: def $al killed $al killed $eax
; XOP-NEXT: retq
%b = call i8 @llvm.bitreverse.i8(i8 %a)
ret i8 %b
@@ -62,7 +62,7 @@
define i16 @test_bitreverse_i16(i16 %a) nounwind {
; SSE-LABEL: test_bitreverse_i16:
; SSE: # %bb.0:
-; SSE-NEXT: # kill: def %edi killed %edi def %rdi
+; SSE-NEXT: # kill: def $edi killed $edi def $rdi
; SSE-NEXT: rolw $8, %di
; SSE-NEXT: movl %edi, %eax
; SSE-NEXT: andl $3855, %eax # imm = 0xF0F
@@ -80,12 +80,12 @@
; SSE-NEXT: andl $43690, %eax # imm = 0xAAAA
; SSE-NEXT: shrl %eax
; SSE-NEXT: leal (%rax,%rcx,2), %eax
-; SSE-NEXT: # kill: def %ax killed %ax killed %eax
+; SSE-NEXT: # kill: def $ax killed $ax killed $eax
; SSE-NEXT: retq
;
; AVX-LABEL: test_bitreverse_i16:
; AVX: # %bb.0:
-; AVX-NEXT: # kill: def %edi killed %edi def %rdi
+; AVX-NEXT: # kill: def $edi killed $edi def $rdi
; AVX-NEXT: rolw $8, %di
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: andl $3855, %eax # imm = 0xF0F
@@ -103,7 +103,7 @@
; AVX-NEXT: andl $43690, %eax # imm = 0xAAAA
; AVX-NEXT: shrl %eax
; AVX-NEXT: leal (%rax,%rcx,2), %eax
-; AVX-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX-NEXT: # kill: def $ax killed $ax killed $eax
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_i16:
@@ -111,7 +111,7 @@
; XOP-NEXT: vmovd %edi, %xmm0
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: vmovd %xmm0, %eax
-; XOP-NEXT: # kill: def %ax killed %ax killed %eax
+; XOP-NEXT: # kill: def $ax killed $ax killed $eax
; XOP-NEXT: retq
%b = call i16 @llvm.bitreverse.i16(i16 %a)
ret i16 %b
@@ -120,7 +120,7 @@
define i32 @test_bitreverse_i32(i32 %a) nounwind {
; SSE-LABEL: test_bitreverse_i32:
; SSE: # %bb.0:
-; SSE-NEXT: # kill: def %edi killed %edi def %rdi
+; SSE-NEXT: # kill: def $edi killed $edi def $rdi
; SSE-NEXT: bswapl %edi
; SSE-NEXT: movl %edi, %eax
; SSE-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
@@ -142,7 +142,7 @@
;
; AVX-LABEL: test_bitreverse_i32:
; AVX: # %bb.0:
-; AVX-NEXT: # kill: def %edi killed %edi def %rdi
+; AVX-NEXT: # kill: def $edi killed $edi def $rdi
; AVX-NEXT: bswapl %edi
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
diff --git a/llvm/test/CodeGen/X86/vector-compare-all_of.ll b/llvm/test/CodeGen/X86/vector-compare-all_of.ll
index cbaeb14..0a756df 100644
--- a/llvm/test/CodeGen/X86/vector-compare-all_of.ll
+++ b/llvm/test/CodeGen/X86/vector-compare-all_of.ll
@@ -608,7 +608,7 @@
; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; SSE-NEXT: movl $-1, %eax
; SSE-NEXT: cmovnel %ecx, %eax
-; SSE-NEXT: # kill: def %ax killed %ax killed %eax
+; SSE-NEXT: # kill: def $ax killed $ax killed $eax
; SSE-NEXT: retq
;
; AVX-LABEL: test_v8i16_sext:
@@ -619,7 +619,7 @@
; AVX-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; AVX-NEXT: movl $-1, %eax
; AVX-NEXT: cmovnel %ecx, %eax
-; AVX-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX-NEXT: # kill: def $ax killed $ax killed $eax
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v8i16_sext:
@@ -632,7 +632,7 @@
; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512-NEXT: retq
%c = icmp sgt <8 x i16> %a0, %a1
%s = sext <8 x i1> %c to <8 x i16>
@@ -657,7 +657,7 @@
; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; SSE-NEXT: movl $-1, %eax
; SSE-NEXT: cmovnel %ecx, %eax
-; SSE-NEXT: # kill: def %ax killed %ax killed %eax
+; SSE-NEXT: # kill: def $ax killed $ax killed $eax
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v16i16_sext:
@@ -675,7 +675,7 @@
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -686,7 +686,7 @@
; AVX2-NEXT: xorl %eax, %eax
; AVX2-NEXT: cmpl $-1, %ecx
; AVX2-NEXT: cmovel %ecx, %eax
-; AVX2-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -702,7 +702,7 @@
; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%c = icmp sgt <16 x i16> %a0, %a1
@@ -730,7 +730,7 @@
; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; SSE-NEXT: movl $-1, %eax
; SSE-NEXT: cmovnel %ecx, %eax
-; SSE-NEXT: # kill: def %ax killed %ax killed %eax
+; SSE-NEXT: # kill: def $ax killed $ax killed $eax
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v16i16_legal_sext:
@@ -745,7 +745,7 @@
; AVX1-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; AVX1-NEXT: movl $-1, %eax
; AVX1-NEXT: cmovnel %ecx, %eax
-; AVX1-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -759,7 +759,7 @@
; AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; AVX2-NEXT: movl $-1, %eax
; AVX2-NEXT: cmovnel %ecx, %eax
-; AVX2-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -777,7 +777,7 @@
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
; AVX512-NEXT: movsbl %al, %eax
-; AVX512-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%c = icmp sgt <16 x i16> %a0, %a1
@@ -804,7 +804,7 @@
; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; SSE-NEXT: movl $-1, %eax
; SSE-NEXT: cmovnel %ecx, %eax
-; SSE-NEXT: # kill: def %al killed %al killed %eax
+; SSE-NEXT: # kill: def $al killed $al killed $eax
; SSE-NEXT: retq
;
; AVX-LABEL: test_v16i8_sext:
@@ -815,7 +815,7 @@
; AVX-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; AVX-NEXT: movl $-1, %eax
; AVX-NEXT: cmovnel %ecx, %eax
-; AVX-NEXT: # kill: def %al killed %al killed %eax
+; AVX-NEXT: # kill: def $al killed $al killed $eax
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v16i8_sext:
@@ -830,7 +830,7 @@
; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def %al killed %al killed %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
; AVX512-NEXT: retq
%c = icmp sgt <16 x i8> %a0, %a1
%s = sext <16 x i1> %c to <16 x i8>
@@ -857,7 +857,7 @@
; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; SSE-NEXT: movl $-1, %eax
; SSE-NEXT: cmovnel %ecx, %eax
-; SSE-NEXT: # kill: def %al killed %al killed %eax
+; SSE-NEXT: # kill: def $al killed $al killed $eax
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v32i8_sext:
@@ -877,7 +877,7 @@
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: # kill: def %al killed %al killed %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -888,7 +888,7 @@
; AVX2-NEXT: xorl %eax, %eax
; AVX2-NEXT: cmpl $-1, %ecx
; AVX2-NEXT: cmovel %ecx, %eax
-; AVX2-NEXT: # kill: def %al killed %al killed %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -906,7 +906,7 @@
; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def %al killed %al killed %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%c = icmp sgt <32 x i8> %a0, %a1
diff --git a/llvm/test/CodeGen/X86/vector-compare-any_of.ll b/llvm/test/CodeGen/X86/vector-compare-any_of.ll
index a94ab5e..dcf01b6 100644
--- a/llvm/test/CodeGen/X86/vector-compare-any_of.ll
+++ b/llvm/test/CodeGen/X86/vector-compare-any_of.ll
@@ -562,7 +562,7 @@
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: negl %eax
; SSE-NEXT: sbbl %eax, %eax
-; SSE-NEXT: # kill: def %ax killed %ax killed %eax
+; SSE-NEXT: # kill: def $ax killed $ax killed $eax
; SSE-NEXT: retq
;
; AVX-LABEL: test_v8i16_sext:
@@ -571,7 +571,7 @@
; AVX-NEXT: vpmovmskb %xmm0, %eax
; AVX-NEXT: negl %eax
; AVX-NEXT: sbbl %eax, %eax
-; AVX-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX-NEXT: # kill: def $ax killed $ax killed $eax
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v8i16_sext:
@@ -584,7 +584,7 @@
; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512-NEXT: retq
%c = icmp sgt <8 x i16> %a0, %a1
%s = sext <8 x i1> %c to <8 x i16>
@@ -607,7 +607,7 @@
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: negl %eax
; SSE-NEXT: sbbl %eax, %eax
-; SSE-NEXT: # kill: def %ax killed %ax killed %eax
+; SSE-NEXT: # kill: def $ax killed $ax killed $eax
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v16i16_sext:
@@ -625,7 +625,7 @@
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -635,7 +635,7 @@
; AVX2-NEXT: vpmovmskb %ymm0, %eax
; AVX2-NEXT: negl %eax
; AVX2-NEXT: sbbl %eax, %eax
-; AVX2-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -651,7 +651,7 @@
; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%c = icmp sgt <16 x i16> %a0, %a1
@@ -677,7 +677,7 @@
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: negl %eax
; SSE-NEXT: sbbl %eax, %eax
-; SSE-NEXT: # kill: def %ax killed %ax killed %eax
+; SSE-NEXT: # kill: def $ax killed $ax killed $eax
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v16i16_legal_sext:
@@ -690,7 +690,7 @@
; AVX1-NEXT: vpmovmskb %xmm0, %eax
; AVX1-NEXT: negl %eax
; AVX1-NEXT: sbbl %eax, %eax
-; AVX1-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -702,7 +702,7 @@
; AVX2-NEXT: vpmovmskb %xmm0, %eax
; AVX2-NEXT: negl %eax
; AVX2-NEXT: sbbl %eax, %eax
-; AVX2-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -720,7 +720,7 @@
; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
; AVX512-NEXT: movsbl %al, %eax
-; AVX512-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%c = icmp sgt <16 x i16> %a0, %a1
@@ -745,7 +745,7 @@
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: negl %eax
; SSE-NEXT: sbbl %eax, %eax
-; SSE-NEXT: # kill: def %al killed %al killed %eax
+; SSE-NEXT: # kill: def $al killed $al killed $eax
; SSE-NEXT: retq
;
; AVX-LABEL: test_v16i8_sext:
@@ -754,7 +754,7 @@
; AVX-NEXT: vpmovmskb %xmm0, %eax
; AVX-NEXT: negl %eax
; AVX-NEXT: sbbl %eax, %eax
-; AVX-NEXT: # kill: def %al killed %al killed %eax
+; AVX-NEXT: # kill: def $al killed $al killed $eax
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v16i8_sext:
@@ -769,7 +769,7 @@
; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def %al killed %al killed %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
; AVX512-NEXT: retq
%c = icmp sgt <16 x i8> %a0, %a1
%s = sext <16 x i1> %c to <16 x i8>
@@ -794,7 +794,7 @@
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: negl %eax
; SSE-NEXT: sbbl %eax, %eax
-; SSE-NEXT: # kill: def %al killed %al killed %eax
+; SSE-NEXT: # kill: def $al killed $al killed $eax
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v32i8_sext:
@@ -814,7 +814,7 @@
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: # kill: def %al killed %al killed %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -824,7 +824,7 @@
; AVX2-NEXT: vpmovmskb %ymm0, %eax
; AVX2-NEXT: negl %eax
; AVX2-NEXT: sbbl %eax, %eax
-; AVX2-NEXT: # kill: def %al killed %al killed %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -842,7 +842,7 @@
; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def %al killed %al killed %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%c = icmp sgt <32 x i8> %a0, %a1
diff --git a/llvm/test/CodeGen/X86/vector-compare-results.ll b/llvm/test/CodeGen/X86/vector-compare-results.ll
index e922893..2a0fbf4 100644
--- a/llvm/test/CodeGen/X86/vector-compare-results.ll
+++ b/llvm/test/CodeGen/X86/vector-compare-results.ll
@@ -145,7 +145,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = fcmp ogt <4 x double> %a0, %a1
@@ -181,7 +181,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = fcmp ogt <8 x float> %a0, %a1
@@ -244,7 +244,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp sgt <4 x i64> %a0, %a1
@@ -281,7 +281,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp sgt <8 x i32> %a0, %a1
@@ -334,7 +334,7 @@
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = icmp sgt <16 x i16> %a0, %a1
@@ -661,7 +661,7 @@
; AVX512F-NEXT: vcmpltpd %zmm0, %zmm1, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -670,7 +670,7 @@
; AVX512DQ-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -678,7 +678,7 @@
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = fcmp ogt <8 x double> %a0, %a1
@@ -741,7 +741,7 @@
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltps %zmm0, %zmm1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = fcmp ogt <16 x float> %a0, %a1
@@ -840,7 +840,7 @@
; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -849,7 +849,7 @@
; AVX512DQ-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -857,7 +857,7 @@
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = icmp sgt <8 x i64> %a0, %a1
@@ -923,7 +923,7 @@
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = icmp sgt <16 x i32> %a0, %a1
@@ -1238,7 +1238,7 @@
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtw %zmm1, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
%1 = icmp sgt <32 x i16> %a0, %a1
ret <32 x i1> %1
@@ -2261,9 +2261,9 @@
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm4
; AVX512F-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm2
; AVX512F-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vmovdqa %xmm4, %xmm1
-; AVX512F-NEXT: # kill: def %xmm2 killed %xmm2 killed %ymm2
+; AVX512F-NEXT: # kill: def $xmm2 killed $xmm2 killed $ymm2
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -2273,9 +2273,9 @@
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm4
; AVX512DQ-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm2
; AVX512DQ-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vmovdqa %xmm4, %xmm1
-; AVX512DQ-NEXT: # kill: def %xmm2 killed %xmm2 killed %ymm2
+; AVX512DQ-NEXT: # kill: def $xmm2 killed $xmm2 killed $ymm2
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -2384,7 +2384,7 @@
; AVX512BW-NEXT: vcmpltpd %zmm1, %zmm3, %k1
; AVX512BW-NEXT: kunpckbw %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = fcmp ogt <16 x double> %a0, %a1
@@ -2739,7 +2739,7 @@
; AVX512BW-NEXT: vcmpltps %zmm1, %zmm3, %k1
; AVX512BW-NEXT: kunpckwd %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
%1 = fcmp ogt <32 x float> %a0, %a1
ret <32 x i1> %1
@@ -2935,7 +2935,7 @@
; AVX512BW-NEXT: vpcmpgtq %zmm3, %zmm1, %k1
; AVX512BW-NEXT: kunpckbw %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = icmp sgt <16 x i64> %a0, %a1
@@ -3282,7 +3282,7 @@
; AVX512BW-NEXT: vpcmpgtd %zmm3, %zmm1, %k1
; AVX512BW-NEXT: kunpckwd %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
%1 = icmp sgt <32 x i32> %a0, %a1
ret <32 x i1> %1
@@ -6934,7 +6934,7 @@
; AVX512BW-NEXT: kunpckbw %k1, %k2, %k1
; AVX512BW-NEXT: kunpckwd %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
%1 = fcmp ogt <32 x double> %a0, %a1
ret <32 x i1> %1
@@ -7577,7 +7577,7 @@
; AVX512BW-NEXT: kunpckbw %k1, %k2, %k1
; AVX512BW-NEXT: kunpckwd %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
%1 = icmp sgt <32 x i64> %a0, %a1
ret <32 x i1> %1
diff --git a/llvm/test/CodeGen/X86/vector-extend-inreg.ll b/llvm/test/CodeGen/X86/vector-extend-inreg.ll
index 5f6eaa3..289afeb 100644
--- a/llvm/test/CodeGen/X86/vector-extend-inreg.ll
+++ b/llvm/test/CodeGen/X86/vector-extend-inreg.ll
@@ -47,7 +47,7 @@
; X64-SSE-NEXT: movq %rsp, %rbp
; X64-SSE-NEXT: andq $-128, %rsp
; X64-SSE-NEXT: subq $256, %rsp # imm = 0x100
-; X64-SSE-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-SSE-NEXT: # kill: def $edi killed $edi def $rdi
; X64-SSE-NEXT: psrldq {{.*#+}} xmm7 = xmm7[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
; X64-SSE-NEXT: xorps %xmm0, %xmm0
; X64-SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
@@ -99,7 +99,7 @@
; X64-AVX-NEXT: movq %rsp, %rbp
; X64-AVX-NEXT: andq $-128, %rsp
; X64-AVX-NEXT: subq $256, %rsp # imm = 0x100
-; X64-AVX-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-AVX-NEXT: # kill: def $edi killed $edi def $rdi
; X64-AVX-NEXT: vpermpd {{.*#+}} ymm0 = ymm3[3,1,2,3]
; X64-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
diff --git a/llvm/test/CodeGen/X86/vector-half-conversions.ll b/llvm/test/CodeGen/X86/vector-half-conversions.ll
index a6d7afb..51a4dae 100644
--- a/llvm/test/CodeGen/X86/vector-half-conversions.ll
+++ b/llvm/test/CodeGen/X86/vector-half-conversions.ll
@@ -29,7 +29,7 @@
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: movq %rax, %rdx
; AVX1-NEXT: movswl %ax, %esi
-; AVX1-NEXT: # kill: def %eax killed %eax killed %rax
+; AVX1-NEXT: # kill: def $eax killed $eax killed $rax
; AVX1-NEXT: shrl $16, %eax
; AVX1-NEXT: shrq $32, %rcx
; AVX1-NEXT: shrq $48, %rdx
@@ -56,7 +56,7 @@
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: movq %rax, %rdx
; AVX2-NEXT: movswl %ax, %esi
-; AVX2-NEXT: # kill: def %eax killed %eax killed %rax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
; AVX2-NEXT: shrl $16, %eax
; AVX2-NEXT: shrq $32, %rcx
; AVX2-NEXT: shrq $48, %rdx
@@ -83,7 +83,7 @@
; AVX512F-NEXT: movq %rax, %rcx
; AVX512F-NEXT: movq %rax, %rdx
; AVX512F-NEXT: movswl %ax, %esi
-; AVX512F-NEXT: # kill: def %eax killed %eax killed %rax
+; AVX512F-NEXT: # kill: def $eax killed $eax killed $rax
; AVX512F-NEXT: shrl $16, %eax
; AVX512F-NEXT: shrq $32, %rcx
; AVX512F-NEXT: shrq $48, %rdx
@@ -110,7 +110,7 @@
; AVX512VL-NEXT: movq %rax, %rcx
; AVX512VL-NEXT: movq %rax, %rdx
; AVX512VL-NEXT: movswl %ax, %esi
-; AVX512VL-NEXT: # kill: def %eax killed %eax killed %rax
+; AVX512VL-NEXT: # kill: def $eax killed $eax killed $rax
; AVX512VL-NEXT: shrl $16, %eax
; AVX512VL-NEXT: shrq $32, %rcx
; AVX512VL-NEXT: shrq $48, %rdx
@@ -141,7 +141,7 @@
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: movq %rax, %rdx
; AVX1-NEXT: movswl %ax, %esi
-; AVX1-NEXT: # kill: def %eax killed %eax killed %rax
+; AVX1-NEXT: # kill: def $eax killed $eax killed $rax
; AVX1-NEXT: shrl $16, %eax
; AVX1-NEXT: shrq $32, %rcx
; AVX1-NEXT: shrq $48, %rdx
@@ -167,7 +167,7 @@
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: movq %rax, %rdx
; AVX2-NEXT: movswl %ax, %esi
-; AVX2-NEXT: # kill: def %eax killed %eax killed %rax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
; AVX2-NEXT: shrl $16, %eax
; AVX2-NEXT: shrq $32, %rcx
; AVX2-NEXT: shrq $48, %rdx
@@ -193,7 +193,7 @@
; AVX512F-NEXT: movq %rax, %rcx
; AVX512F-NEXT: movq %rax, %rdx
; AVX512F-NEXT: movswl %ax, %esi
-; AVX512F-NEXT: # kill: def %eax killed %eax killed %rax
+; AVX512F-NEXT: # kill: def $eax killed $eax killed $rax
; AVX512F-NEXT: shrl $16, %eax
; AVX512F-NEXT: shrq $32, %rcx
; AVX512F-NEXT: shrq $48, %rdx
@@ -221,7 +221,7 @@
; AVX512VL-NEXT: movq %rax, %rcx
; AVX512VL-NEXT: movq %rax, %rdx
; AVX512VL-NEXT: movswl %ax, %esi
-; AVX512VL-NEXT: # kill: def %eax killed %eax killed %rax
+; AVX512VL-NEXT: # kill: def $eax killed $eax killed $rax
; AVX512VL-NEXT: shrl $16, %eax
; AVX512VL-NEXT: shrq $32, %rcx
; AVX512VL-NEXT: shrq $48, %rdx
@@ -253,7 +253,7 @@
; ALL-NEXT: movq %rdx, %r8
; ALL-NEXT: movq %rdx, %r10
; ALL-NEXT: movswl %dx, %r9d
-; ALL-NEXT: # kill: def %edx killed %edx killed %rdx
+; ALL-NEXT: # kill: def $edx killed $edx killed $rdx
; ALL-NEXT: shrl $16, %edx
; ALL-NEXT: shrq $32, %r8
; ALL-NEXT: shrq $48, %r10
@@ -261,7 +261,7 @@
; ALL-NEXT: movq %rdi, %rax
; ALL-NEXT: movq %rdi, %rsi
; ALL-NEXT: movswl %di, %ecx
-; ALL-NEXT: # kill: def %edi killed %edi killed %rdi
+; ALL-NEXT: # kill: def $edi killed $edi killed $rdi
; ALL-NEXT: shrl $16, %edi
; ALL-NEXT: shrq $32, %rax
; ALL-NEXT: shrq $48, %rsi
@@ -314,7 +314,7 @@
; AVX1-NEXT: movswl %cx, %ecx
; AVX1-NEXT: vmovd %ecx, %xmm9
; AVX1-NEXT: movswl %ax, %ecx
-; AVX1-NEXT: # kill: def %eax killed %eax killed %rax
+; AVX1-NEXT: # kill: def $eax killed $eax killed $rax
; AVX1-NEXT: shrl $16, %eax
; AVX1-NEXT: cwtl
; AVX1-NEXT: vmovd %eax, %xmm10
@@ -329,7 +329,7 @@
; AVX1-NEXT: movswl %cx, %ecx
; AVX1-NEXT: vmovd %ecx, %xmm13
; AVX1-NEXT: movswl %ax, %ecx
-; AVX1-NEXT: # kill: def %eax killed %eax killed %rax
+; AVX1-NEXT: # kill: def $eax killed $eax killed $rax
; AVX1-NEXT: shrl $16, %eax
; AVX1-NEXT: cwtl
; AVX1-NEXT: vmovd %eax, %xmm14
@@ -344,7 +344,7 @@
; AVX1-NEXT: movswl %cx, %ecx
; AVX1-NEXT: vmovd %ecx, %xmm3
; AVX1-NEXT: movswl %ax, %ecx
-; AVX1-NEXT: # kill: def %eax killed %eax killed %rax
+; AVX1-NEXT: # kill: def $eax killed $eax killed $rax
; AVX1-NEXT: shrl $16, %eax
; AVX1-NEXT: cwtl
; AVX1-NEXT: vmovd %eax, %xmm4
@@ -409,7 +409,7 @@
; AVX2-NEXT: movswl %cx, %ecx
; AVX2-NEXT: vmovd %ecx, %xmm9
; AVX2-NEXT: movswl %ax, %ecx
-; AVX2-NEXT: # kill: def %eax killed %eax killed %rax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
; AVX2-NEXT: shrl $16, %eax
; AVX2-NEXT: cwtl
; AVX2-NEXT: vmovd %eax, %xmm10
@@ -424,7 +424,7 @@
; AVX2-NEXT: movswl %cx, %ecx
; AVX2-NEXT: vmovd %ecx, %xmm13
; AVX2-NEXT: movswl %ax, %ecx
-; AVX2-NEXT: # kill: def %eax killed %eax killed %rax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
; AVX2-NEXT: shrl $16, %eax
; AVX2-NEXT: cwtl
; AVX2-NEXT: vmovd %eax, %xmm14
@@ -439,7 +439,7 @@
; AVX2-NEXT: movswl %cx, %ecx
; AVX2-NEXT: vmovd %ecx, %xmm3
; AVX2-NEXT: movswl %ax, %ecx
-; AVX2-NEXT: # kill: def %eax killed %eax killed %rax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
; AVX2-NEXT: shrl $16, %eax
; AVX2-NEXT: cwtl
; AVX2-NEXT: vmovd %eax, %xmm4
@@ -504,7 +504,7 @@
; AVX512F-NEXT: movswl %cx, %ecx
; AVX512F-NEXT: vmovd %ecx, %xmm9
; AVX512F-NEXT: movswl %ax, %ecx
-; AVX512F-NEXT: # kill: def %eax killed %eax killed %rax
+; AVX512F-NEXT: # kill: def $eax killed $eax killed $rax
; AVX512F-NEXT: shrl $16, %eax
; AVX512F-NEXT: cwtl
; AVX512F-NEXT: vmovd %eax, %xmm11
@@ -519,7 +519,7 @@
; AVX512F-NEXT: movswl %cx, %ecx
; AVX512F-NEXT: vmovd %ecx, %xmm14
; AVX512F-NEXT: movswl %ax, %ecx
-; AVX512F-NEXT: # kill: def %eax killed %eax killed %rax
+; AVX512F-NEXT: # kill: def $eax killed $eax killed $rax
; AVX512F-NEXT: shrl $16, %eax
; AVX512F-NEXT: cwtl
; AVX512F-NEXT: vmovd %eax, %xmm15
@@ -534,7 +534,7 @@
; AVX512F-NEXT: movswl %cx, %ecx
; AVX512F-NEXT: vmovd %ecx, %xmm1
; AVX512F-NEXT: movswl %ax, %ecx
-; AVX512F-NEXT: # kill: def %eax killed %eax killed %rax
+; AVX512F-NEXT: # kill: def $eax killed $eax killed $rax
; AVX512F-NEXT: shrl $16, %eax
; AVX512F-NEXT: cwtl
; AVX512F-NEXT: vmovd %eax, %xmm4
@@ -600,7 +600,7 @@
; AVX512VL-NEXT: movswl %cx, %ecx
; AVX512VL-NEXT: vmovd %ecx, %xmm9
; AVX512VL-NEXT: movswl %ax, %ecx
-; AVX512VL-NEXT: # kill: def %eax killed %eax killed %rax
+; AVX512VL-NEXT: # kill: def $eax killed $eax killed $rax
; AVX512VL-NEXT: shrl $16, %eax
; AVX512VL-NEXT: cwtl
; AVX512VL-NEXT: vmovd %eax, %xmm11
@@ -615,7 +615,7 @@
; AVX512VL-NEXT: movswl %cx, %ecx
; AVX512VL-NEXT: vmovd %ecx, %xmm14
; AVX512VL-NEXT: movswl %ax, %ecx
-; AVX512VL-NEXT: # kill: def %eax killed %eax killed %rax
+; AVX512VL-NEXT: # kill: def $eax killed $eax killed $rax
; AVX512VL-NEXT: shrl $16, %eax
; AVX512VL-NEXT: cwtl
; AVX512VL-NEXT: vmovd %eax, %xmm15
@@ -630,7 +630,7 @@
; AVX512VL-NEXT: movswl %cx, %ecx
; AVX512VL-NEXT: vmovd %ecx, %xmm18
; AVX512VL-NEXT: movswl %ax, %ecx
-; AVX512VL-NEXT: # kill: def %eax killed %eax killed %rax
+; AVX512VL-NEXT: # kill: def $eax killed $eax killed $rax
; AVX512VL-NEXT: shrl $16, %eax
; AVX512VL-NEXT: cwtl
; AVX512VL-NEXT: vmovd %eax, %xmm19
@@ -736,7 +736,7 @@
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: movq %rax, %rdx
; AVX1-NEXT: movswl %ax, %esi
-; AVX1-NEXT: # kill: def %eax killed %eax killed %rax
+; AVX1-NEXT: # kill: def $eax killed $eax killed $rax
; AVX1-NEXT: shrl $16, %eax
; AVX1-NEXT: shrq $32, %rcx
; AVX1-NEXT: shrq $48, %rdx
@@ -762,7 +762,7 @@
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: movq %rax, %rdx
; AVX2-NEXT: movswl %ax, %esi
-; AVX2-NEXT: # kill: def %eax killed %eax killed %rax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
; AVX2-NEXT: shrl $16, %eax
; AVX2-NEXT: shrq $32, %rcx
; AVX2-NEXT: shrq $48, %rdx
@@ -788,7 +788,7 @@
; AVX512F-NEXT: movq %rax, %rcx
; AVX512F-NEXT: movq %rax, %rdx
; AVX512F-NEXT: movswl %ax, %esi
-; AVX512F-NEXT: # kill: def %eax killed %eax killed %rax
+; AVX512F-NEXT: # kill: def $eax killed $eax killed $rax
; AVX512F-NEXT: shrl $16, %eax
; AVX512F-NEXT: shrq $32, %rcx
; AVX512F-NEXT: shrq $48, %rdx
@@ -816,7 +816,7 @@
; AVX512VL-NEXT: movq %rax, %rcx
; AVX512VL-NEXT: movq %rax, %rdx
; AVX512VL-NEXT: movswl %ax, %esi
-; AVX512VL-NEXT: # kill: def %eax killed %eax killed %rax
+; AVX512VL-NEXT: # kill: def $eax killed $eax killed $rax
; AVX512VL-NEXT: shrl $16, %eax
; AVX512VL-NEXT: shrq $32, %rcx
; AVX512VL-NEXT: shrq $48, %rdx
@@ -2078,7 +2078,7 @@
; ALL: # %bb.0:
; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; ALL-NEXT: vmovd %xmm0, %eax
-; ALL-NEXT: # kill: def %ax killed %ax killed %eax
+; ALL-NEXT: # kill: def $ax killed $ax killed $eax
; ALL-NEXT: retq
%1 = fptrunc float %a0 to half
%2 = bitcast half %1 to i16
@@ -2995,7 +2995,7 @@
; AVX1-NEXT: movl %eax, %ebx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r14d
@@ -3032,7 +3032,7 @@
; AVX2-NEXT: movl %eax, %ebx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %r14d
@@ -3069,7 +3069,7 @@
; AVX512-NEXT: movl %eax, %ebx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %r14d
@@ -3111,7 +3111,7 @@
; AVX1-NEXT: movl %eax, %ebx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r14d
@@ -3149,7 +3149,7 @@
; AVX2-NEXT: movl %eax, %ebx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %r14d
@@ -3187,7 +3187,7 @@
; AVX512F-NEXT: movl %eax, %ebx
; AVX512F-NEXT: shll $16, %ebx
; AVX512F-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movzwl %ax, %r14d
@@ -3225,7 +3225,7 @@
; AVX512VL-NEXT: movl %eax, %ebx
; AVX512VL-NEXT: shll $16, %ebx
; AVX512VL-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512VL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movzwl %ax, %r14d
@@ -3269,7 +3269,7 @@
; AVX1-NEXT: movl %eax, %ebx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r14d
@@ -3307,7 +3307,7 @@
; AVX2-NEXT: movl %eax, %ebx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %r14d
@@ -3345,7 +3345,7 @@
; AVX512-NEXT: movl %eax, %ebx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %r14d
@@ -3391,7 +3391,7 @@
; AVX1-NEXT: movl %eax, %ebx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r15d
@@ -3416,7 +3416,7 @@
; AVX1-NEXT: movl %eax, %ebx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r15d
@@ -3458,7 +3458,7 @@
; AVX2-NEXT: movl %eax, %ebx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %r15d
@@ -3483,7 +3483,7 @@
; AVX2-NEXT: movl %eax, %ebx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %r15d
@@ -3524,7 +3524,7 @@
; AVX512-NEXT: movl %eax, %ebx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovups (%rsp), %zmm0 # 64-byte Reload
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %r15d
@@ -3552,7 +3552,7 @@
; AVX512-NEXT: movl %eax, %ebx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %r15d
@@ -3650,7 +3650,7 @@
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %r15d
; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %ebp
@@ -3688,7 +3688,7 @@
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %r15d
; AVX2-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %ebp
@@ -3726,7 +3726,7 @@
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %r15d
; AVX512-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %ebp
@@ -3763,7 +3763,7 @@
; AVX1-NEXT: movl %eax, %ebp
; AVX1-NEXT: shll $16, %ebp
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %ebx
@@ -3805,7 +3805,7 @@
; AVX2-NEXT: movl %eax, %ebp
; AVX2-NEXT: shll $16, %ebp
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %ebx
@@ -3847,7 +3847,7 @@
; AVX512F-NEXT: movl %eax, %ebp
; AVX512F-NEXT: shll $16, %ebp
; AVX512F-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movzwl %ax, %ebx
@@ -3889,7 +3889,7 @@
; AVX512VL-NEXT: movl %eax, %ebp
; AVX512VL-NEXT: shll $16, %ebp
; AVX512VL-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512VL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movzwl %ax, %ebx
@@ -3938,7 +3938,7 @@
; AVX1-NEXT: movl %eax, %ebp
; AVX1-NEXT: shll $16, %ebp
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %ebx
@@ -3980,7 +3980,7 @@
; AVX2-NEXT: movl %eax, %ebp
; AVX2-NEXT: shll $16, %ebp
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %ebx
@@ -4022,7 +4022,7 @@
; AVX512-NEXT: movl %eax, %ebp
; AVX512-NEXT: shll $16, %ebp
; AVX512-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %ebx
@@ -4092,7 +4092,7 @@
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %r13d
; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %ebp
@@ -4100,7 +4100,7 @@
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %r14d
; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %r15d
@@ -4160,7 +4160,7 @@
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %r13d
; AVX2-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %ebp
@@ -4168,7 +4168,7 @@
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %r14d
; AVX2-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %r15d
@@ -4230,7 +4230,7 @@
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %r13d
; AVX512-NEXT: vmovups {{[0-9]+}}(%rsp), %zmm0 # 64-byte Reload
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %ebp
@@ -4238,7 +4238,7 @@
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %r14d
; AVX512-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %r15d
diff --git a/llvm/test/CodeGen/X86/vector-lzcnt-128.ll b/llvm/test/CodeGen/X86/vector-lzcnt-128.ll
index 4abace0..dc945c8 100644
--- a/llvm/test/CodeGen/X86/vector-lzcnt-128.ll
+++ b/llvm/test/CodeGen/X86/vector-lzcnt-128.ll
@@ -233,9 +233,9 @@
;
; AVX512CD-LABEL: testv2i64:
; AVX512CD: # %bb.0:
-; AVX512CD-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512CD-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
-; AVX512CD-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512CD-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512CD-NEXT: vzeroupper
; AVX512CD-NEXT: retq
;
@@ -499,9 +499,9 @@
;
; AVX512CD-LABEL: testv2i64u:
; AVX512CD: # %bb.0:
-; AVX512CD-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512CD-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
-; AVX512CD-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512CD-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512CD-NEXT: vzeroupper
; AVX512CD-NEXT: retq
;
@@ -747,9 +747,9 @@
;
; AVX512CD-LABEL: testv4i32:
; AVX512CD: # %bb.0:
-; AVX512CD-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512CD-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
-; AVX512CD-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512CD-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512CD-NEXT: vzeroupper
; AVX512CD-NEXT: retq
;
@@ -989,9 +989,9 @@
;
; AVX512CD-LABEL: testv4i32u:
; AVX512CD: # %bb.0:
-; AVX512CD-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512CD-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
-; AVX512CD-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512CD-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512CD-NEXT: vzeroupper
; AVX512CD-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-lzcnt-256.ll b/llvm/test/CodeGen/X86/vector-lzcnt-256.ll
index 73f7b3c..e943d73 100644
--- a/llvm/test/CodeGen/X86/vector-lzcnt-256.ll
+++ b/llvm/test/CodeGen/X86/vector-lzcnt-256.ll
@@ -162,9 +162,9 @@
;
; AVX512CD-LABEL: testv4i64:
; AVX512CD: # %bb.0:
-; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512CD-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
-; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512CD-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512CD-NEXT: retq
;
; X32-AVX-LABEL: testv4i64:
@@ -354,9 +354,9 @@
;
; AVX512CD-LABEL: testv4i64u:
; AVX512CD: # %bb.0:
-; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512CD-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
-; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512CD-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512CD-NEXT: retq
;
; X32-AVX-LABEL: testv4i64u:
@@ -521,9 +521,9 @@
;
; AVX512CD-LABEL: testv8i32:
; AVX512CD: # %bb.0:
-; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512CD-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
-; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512CD-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512CD-NEXT: retq
;
; X32-AVX-LABEL: testv8i32:
@@ -683,9 +683,9 @@
;
; AVX512CD-LABEL: testv8i32u:
; AVX512CD: # %bb.0:
-; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512CD-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
-; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512CD-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512CD-NEXT: retq
;
; X32-AVX-LABEL: testv8i32u:
diff --git a/llvm/test/CodeGen/X86/vector-popcnt-128.ll b/llvm/test/CodeGen/X86/vector-popcnt-128.ll
index e3cb8f5..df42ebf 100644
--- a/llvm/test/CodeGen/X86/vector-popcnt-128.ll
+++ b/llvm/test/CodeGen/X86/vector-popcnt-128.ll
@@ -115,9 +115,9 @@
;
; AVX512VPOPCNTDQ-LABEL: testv2i64:
; AVX512VPOPCNTDQ: # %bb.0:
-; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512VPOPCNTDQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512VPOPCNTDQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512VPOPCNTDQ-NEXT: vzeroupper
; AVX512VPOPCNTDQ-NEXT: retq
;
@@ -290,9 +290,9 @@
;
; AVX512VPOPCNTDQ-LABEL: testv4i32:
; AVX512VPOPCNTDQ: # %bb.0:
-; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512VPOPCNTDQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512VPOPCNTDQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512VPOPCNTDQ-NEXT: vzeroupper
; AVX512VPOPCNTDQ-NEXT: retq
;
@@ -456,7 +456,7 @@
; AVX512VPOPCNTDQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512VPOPCNTDQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512VPOPCNTDQ-NEXT: vzeroupper
; AVX512VPOPCNTDQ-NEXT: retq
;
@@ -470,9 +470,9 @@
;
; BITALG_NOVLX-LABEL: testv8i16:
; BITALG_NOVLX: # %bb.0:
-; BITALG_NOVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; BITALG_NOVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; BITALG_NOVLX-NEXT: vpopcntw %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; BITALG_NOVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; BITALG_NOVLX-NEXT: vzeroupper
; BITALG_NOVLX-NEXT: retq
;
@@ -595,9 +595,9 @@
;
; BITALG_NOVLX-LABEL: testv16i8:
; BITALG_NOVLX: # %bb.0:
-; BITALG_NOVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; BITALG_NOVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; BITALG_NOVLX-NEXT: vpopcntb %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; BITALG_NOVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; BITALG_NOVLX-NEXT: vzeroupper
; BITALG_NOVLX-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-popcnt-256.ll b/llvm/test/CodeGen/X86/vector-popcnt-256.ll
index 0f09b4f..b2cc2f1 100644
--- a/llvm/test/CodeGen/X86/vector-popcnt-256.ll
+++ b/llvm/test/CodeGen/X86/vector-popcnt-256.ll
@@ -46,9 +46,9 @@
;
; AVX512VPOPCNTDQ-LABEL: testv4i64:
; AVX512VPOPCNTDQ: # %bb.0:
-; AVX512VPOPCNTDQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512VPOPCNTDQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512VPOPCNTDQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512VPOPCNTDQ-NEXT: retq
;
; AVX512VPOPCNTDQVL-LABEL: testv4i64:
@@ -139,9 +139,9 @@
;
; AVX512VPOPCNTDQ-LABEL: testv8i32:
; AVX512VPOPCNTDQ: # %bb.0:
-; AVX512VPOPCNTDQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512VPOPCNTDQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512VPOPCNTDQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512VPOPCNTDQ-NEXT: retq
;
; AVX512VPOPCNTDQVL-LABEL: testv8i32:
@@ -246,9 +246,9 @@
;
; BITALG_NOVLX-LABEL: testv16i16:
; BITALG_NOVLX: # %bb.0:
-; BITALG_NOVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; BITALG_NOVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; BITALG_NOVLX-NEXT: vpopcntw %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; BITALG_NOVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv16i16:
@@ -318,9 +318,9 @@
;
; BITALG_NOVLX-LABEL: testv32i8:
; BITALG_NOVLX: # %bb.0:
-; BITALG_NOVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; BITALG_NOVLX-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; BITALG_NOVLX-NEXT: vpopcntb %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; BITALG_NOVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv32i8:
diff --git a/llvm/test/CodeGen/X86/vector-rotate-128.ll b/llvm/test/CodeGen/X86/vector-rotate-128.ll
index 8af96c1..0b0f949 100644
--- a/llvm/test/CodeGen/X86/vector-rotate-128.ll
+++ b/llvm/test/CodeGen/X86/vector-rotate-128.ll
@@ -78,10 +78,10 @@
;
; AVX512BW-LABEL: var_rotate_v2i64:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -219,10 +219,10 @@
;
; AVX512BW-LABEL: var_rotate_v4i32:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -462,8 +462,8 @@
;
; AVX512BW-LABEL: var_rotate_v8i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
; AVX512BW-NEXT: vpsubw %xmm1, %xmm2, %xmm2
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
@@ -853,10 +853,10 @@
;
; AVX512BW-LABEL: constant_rotate_v2i64:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [4,14]
; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -955,10 +955,10 @@
;
; AVX512BW-LABEL: constant_rotate_v4i32:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [4,5,6,7]
; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1082,7 +1082,7 @@
;
; AVX512BW-LABEL: constant_rotate_v8i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [16,15,14,13,12,11,10,9]
@@ -1378,9 +1378,9 @@
;
; AVX512BW-LABEL: splatconstant_rotate_v2i64:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-NEXT: vprolq $14, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1425,9 +1425,9 @@
;
; AVX512BW-LABEL: splatconstant_rotate_v4i32:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-NEXT: vprold $4, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1563,7 +1563,7 @@
;
; AVX512BW-LABEL: splatconstant_rotate_mask_v2i64:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-NEXT: vprolq $15, %zmm0, %zmm0
; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper
@@ -1615,7 +1615,7 @@
;
; AVX512BW-LABEL: splatconstant_rotate_mask_v4i32:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-NEXT: vprold $4, %zmm0, %zmm0
; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper
diff --git a/llvm/test/CodeGen/X86/vector-rotate-256.ll b/llvm/test/CodeGen/X86/vector-rotate-256.ll
index 46bac26..618a24b 100644
--- a/llvm/test/CodeGen/X86/vector-rotate-256.ll
+++ b/llvm/test/CodeGen/X86/vector-rotate-256.ll
@@ -50,10 +50,10 @@
;
; AVX512BW-LABEL: var_rotate_v4i64:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: var_rotate_v4i64:
@@ -141,10 +141,10 @@
;
; AVX512BW-LABEL: var_rotate_v8i32:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: var_rotate_v8i32:
@@ -271,8 +271,8 @@
;
; AVX512BW-LABEL: var_rotate_v16i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX512BW-NEXT: vpsubw %ymm1, %ymm2, %ymm2
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
@@ -479,10 +479,10 @@
;
; AVX512BW-LABEL: constant_rotate_v4i64:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [4,14,50,60]
; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: constant_rotate_v4i64:
@@ -545,10 +545,10 @@
;
; AVX512BW-LABEL: constant_rotate_v8i32:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [4,5,6,7,8,9,10,11]
; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: constant_rotate_v8i32:
@@ -623,7 +623,7 @@
;
; AVX512BW-LABEL: constant_rotate_v16i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1]
@@ -800,9 +800,9 @@
;
; AVX512BW-LABEL: splatconstant_rotate_v4i64:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vprolq $14, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_rotate_v4i64:
@@ -853,9 +853,9 @@
;
; AVX512BW-LABEL: splatconstant_rotate_v8i32:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vprold $4, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_rotate_v8i32:
@@ -1012,7 +1012,7 @@
;
; AVX512BW-LABEL: splatconstant_rotate_mask_v4i64:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vprolq $15, %zmm0, %zmm0
; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: retq
@@ -1074,7 +1074,7 @@
;
; AVX512BW-LABEL: splatconstant_rotate_mask_v8i32:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vprold $4, %zmm0, %zmm0
; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/vector-sext.ll b/llvm/test/CodeGen/X86/vector-sext.ll
index 4e0cd76..8d803b6 100644
--- a/llvm/test/CodeGen/X86/vector-sext.ll
+++ b/llvm/test/CodeGen/X86/vector-sext.ll
@@ -1241,7 +1241,7 @@
; AVX512F-NEXT: movzbl (%rdi), %eax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -1250,7 +1250,7 @@
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1431,7 +1431,7 @@
; AVX512F-NEXT: movzbl (%rdi), %eax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -1440,7 +1440,7 @@
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1636,7 +1636,7 @@
; AVX512F-NEXT: movzbl (%rdi), %eax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: load_sext_4i1_to_4i64:
@@ -1644,7 +1644,7 @@
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_4i1_to_4i64:
@@ -1994,7 +1994,7 @@
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -2003,7 +2003,7 @@
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -2394,7 +2394,7 @@
; AVX512F-NEXT: movzbl (%rdi), %eax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: load_sext_8i1_to_8i32:
@@ -2402,7 +2402,7 @@
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_8i1_to_8i32:
@@ -2912,7 +2912,7 @@
; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: kmovw (%rdi), %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -3430,7 +3430,7 @@
; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: kmovw (%rdi), %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_16i1_to_16i16:
@@ -4285,7 +4285,7 @@
; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: kmovd (%rdi), %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_32i1_to_32i8:
@@ -5018,7 +5018,7 @@
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpeqw %zmm1, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: sext_32xi1_to_32xi8:
diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll
index 8aa8682..30616c4 100644
--- a/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll
@@ -82,10 +82,10 @@
;
; AVX512-LABEL: var_shift_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -336,16 +336,16 @@
; AVX512DQ-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512DQ-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: var_shift_v8i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -546,7 +546,7 @@
; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -670,9 +670,9 @@
;
; AVX512-LABEL: splatvar_shift_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512-NEXT: vpsraq %xmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -984,7 +984,7 @@
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1127,10 +1127,10 @@
;
; AVX512-LABEL: constant_shift_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [1,7]
; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -1305,16 +1305,16 @@
; AVX512DQ-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512DQ-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: constant_shift_v8i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1495,7 +1495,7 @@
; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1622,9 +1622,9 @@
;
; AVX512-LABEL: splatconstant_shift_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512-NEXT: vpsraq $7, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll
index a99c70e..340cee3 100644
--- a/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll
@@ -75,10 +75,10 @@
;
; AVX512-LABEL: var_shift_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: var_shift_v4i64:
@@ -309,10 +309,10 @@
;
; AVX512BW-LABEL: var_shift_v16i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: var_shift_v16i16:
@@ -696,9 +696,9 @@
;
; AVX512-LABEL: splatvar_shift_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpsraq %xmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatvar_shift_v4i64:
@@ -1170,10 +1170,10 @@
;
; AVX512-LABEL: constant_shift_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [1,7,31,62]
; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: constant_shift_v4i64:
@@ -1360,10 +1360,10 @@
;
; AVX512BW-LABEL: constant_shift_v16i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: constant_shift_v16i16:
@@ -1702,9 +1702,9 @@
;
; AVX512-LABEL: splatconstant_shift_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpsraq $7, %zmm0, %zmm0
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v4i64:
diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll
index 1e5dbea..fc0aa84c 100644
--- a/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll
@@ -290,7 +290,7 @@
; AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -307,16 +307,16 @@
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512DQ-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: var_shift_v8i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -466,7 +466,7 @@
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -789,7 +789,7 @@
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1050,7 +1050,7 @@
; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -1066,16 +1066,16 @@
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512DQ-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: constant_shift_v8i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1202,7 +1202,7 @@
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll
index 0192c8a..b581088 100644
--- a/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll
@@ -272,10 +272,10 @@
;
; AVX512BW-LABEL: var_shift_v16i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: var_shift_v16i16:
@@ -1091,10 +1091,10 @@
;
; AVX512BW-LABEL: constant_shift_v16i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: constant_shift_v16i16:
diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll
index 724fd34..e7526fd 100644
--- a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll
@@ -247,7 +247,7 @@
; AVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -262,16 +262,16 @@
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512DQ-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: var_shift_v8i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -416,7 +416,7 @@
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -731,7 +731,7 @@
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -942,10 +942,10 @@
;
; AVX512BW-LABEL: constant_shift_v8i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1052,7 +1052,7 @@
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-256.ll b/llvm/test/CodeGen/X86/vector-shift-shl-256.ll
index 0471388..4fe0844 100644
--- a/llvm/test/CodeGen/X86/vector-shift-shl-256.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-shl-256.ll
@@ -232,10 +232,10 @@
;
; AVX512BW-LABEL: var_shift_v16i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: var_shift_v16i16:
@@ -966,10 +966,10 @@
;
; AVX512BW-LABEL: constant_shift_v16i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: constant_shift_v16i16:
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll
index 01a7226..8e4c192 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll
@@ -1338,21 +1338,21 @@
define <4 x double> @insert_reg_and_zero_v4f64(double %a) {
; AVX1-LABEL: insert_reg_and_zero_v4f64:
; AVX1: # %bb.0:
-; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_reg_and_zero_v4f64:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: insert_reg_and_zero_v4f64:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX512VL-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll b/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll
index 28e35f5..8242019 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll
@@ -689,7 +689,7 @@
define <16 x i32> @mask_shuffle_v4i32_v16i32_00_01_02_03_00_01_02_03_00_01_02_03_00_01_02_03(<4 x i32> %a) {
; ALL-LABEL: mask_shuffle_v4i32_v16i32_00_01_02_03_00_01_02_03_00_01_02_03_00_01_02_03:
; ALL: # %bb.0:
-; ALL-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; ALL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; ALL-NEXT: retq
@@ -700,7 +700,7 @@
define <16 x float> @mask_shuffle_v4f32_v16f32_00_01_02_03_00_01_02_03_00_01_02_03_00_01_02_03(<4 x float> %a) {
; ALL-LABEL: mask_shuffle_v4f32_v16f32_00_01_02_03_00_01_02_03_00_01_02_03_00_01_02_03:
; ALL: # %bb.0:
-; ALL-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; ALL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; ALL-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll b/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll
index 826b630..eed07bc 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll
@@ -2644,14 +2644,14 @@
define <8 x i64> @shuffle_v2i64_v8i64_01010101(<2 x i64> %a) {
; AVX512F-LABEL: shuffle_v2i64_v8i64_01010101:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v2i64_v8i64_01010101:
; AVX512F-32: # %bb.0:
-; AVX512F-32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX512F-32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX512F-32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX512F-32-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
@@ -2662,14 +2662,14 @@
define <8 x double> @shuffle_v2f64_v8f64_01010101(<2 x double> %a) {
; AVX512F-LABEL: shuffle_v2f64_v8f64_01010101:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v2f64_v8f64_01010101:
; AVX512F-32: # %bb.0:
-; AVX512F-32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX512F-32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX512F-32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX512F-32-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
@@ -2746,7 +2746,7 @@
; AVX512F-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX512F-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,2,3]
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -2755,7 +2755,7 @@
; AVX512F-32-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; AVX512F-32-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX512F-32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,2,3]
-; AVX512F-32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-32-NEXT: vzeroupper
; AVX512F-32-NEXT: retl
%res = shufflevector <8 x i64> %v, <8 x i64> undef, <2 x i32> <i32 2, i32 5>
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll b/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll
index cd2dfbb..0ccd989 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll
@@ -8,7 +8,7 @@
define <8 x float> @expand(<4 x float> %a) {
; SKX64-LABEL: expand:
; SKX64: # %bb.0:
-; SKX64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; SKX64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; SKX64-NEXT: movb $5, %al
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z}
@@ -23,7 +23,7 @@
;
; SKX32-LABEL: expand:
; SKX32: # %bb.0:
-; SKX32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; SKX32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; SKX32-NEXT: movb $5, %al
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z}
@@ -42,7 +42,7 @@
define <8 x float> @expand1(<4 x float> %a ) {
; SKX64-LABEL: expand1:
; SKX64: # %bb.0:
-; SKX64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; SKX64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; SKX64-NEXT: movb $-86, %al
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z}
@@ -50,7 +50,7 @@
;
; KNL64-LABEL: expand1:
; KNL64: # %bb.0:
-; KNL64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; KNL64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; KNL64-NEXT: vmovaps {{.*#+}} ymm1 = <u,0,u,1,u,2,u,3>
; KNL64-NEXT: vpermps %ymm0, %ymm1, %ymm0
; KNL64-NEXT: vxorps %xmm1, %xmm1, %xmm1
@@ -59,7 +59,7 @@
;
; SKX32-LABEL: expand1:
; SKX32: # %bb.0:
-; SKX32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; SKX32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; SKX32-NEXT: movb $-86, %al
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z}
@@ -67,7 +67,7 @@
;
; KNL32-LABEL: expand1:
; KNL32: # %bb.0:
-; KNL32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; KNL32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; KNL32-NEXT: vmovaps {{.*#+}} ymm1 = <u,0,u,1,u,2,u,3>
; KNL32-NEXT: vpermps %ymm0, %ymm1, %ymm0
; KNL32-NEXT: vxorps %xmm1, %xmm1, %xmm1
@@ -81,7 +81,7 @@
define <4 x double> @expand2(<2 x double> %a) {
; SKX64-LABEL: expand2:
; SKX64: # %bb.0:
-; SKX64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; SKX64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; SKX64-NEXT: movb $9, %al
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vexpandpd %ymm0, %ymm0 {%k1} {z}
@@ -89,7 +89,7 @@
;
; KNL64-LABEL: expand2:
; KNL64: # %bb.0:
-; KNL64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; KNL64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; KNL64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
; KNL64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; KNL64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
@@ -97,7 +97,7 @@
;
; SKX32-LABEL: expand2:
; SKX32: # %bb.0:
-; SKX32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; SKX32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; SKX32-NEXT: movb $9, %al
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vexpandpd %ymm0, %ymm0 {%k1} {z}
@@ -105,7 +105,7 @@
;
; KNL32-LABEL: expand2:
; KNL32: # %bb.0:
-; KNL32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; KNL32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; KNL32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
; KNL32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; KNL32-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
@@ -118,7 +118,7 @@
define <8 x i32> @expand3(<4 x i32> %a ) {
; SKX64-LABEL: expand3:
; SKX64: # %bb.0:
-; SKX64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; SKX64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; SKX64-NEXT: movb $-127, %al
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vpexpandd %ymm0, %ymm0 {%k1} {z}
@@ -133,7 +133,7 @@
;
; SKX32-LABEL: expand3:
; SKX32: # %bb.0:
-; SKX32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; SKX32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; SKX32-NEXT: movb $-127, %al
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vpexpandd %ymm0, %ymm0 {%k1} {z}
@@ -153,7 +153,7 @@
define <4 x i64> @expand4(<2 x i64> %a ) {
; SKX64-LABEL: expand4:
; SKX64: # %bb.0:
-; SKX64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; SKX64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; SKX64-NEXT: movb $9, %al
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vpexpandq %ymm0, %ymm0 {%k1} {z}
@@ -161,7 +161,7 @@
;
; KNL64-LABEL: expand4:
; KNL64: # %bb.0:
-; KNL64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; KNL64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; KNL64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
; KNL64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; KNL64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
@@ -169,7 +169,7 @@
;
; SKX32-LABEL: expand4:
; SKX32: # %bb.0:
-; SKX32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; SKX32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; SKX32-NEXT: movb $9, %al
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vpexpandq %ymm0, %ymm0 {%k1} {z}
@@ -177,7 +177,7 @@
;
; KNL32-LABEL: expand4:
; KNL32: # %bb.0:
-; KNL32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; KNL32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; KNL32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
; KNL32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; KNL32-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
@@ -251,7 +251,7 @@
define <16 x float> @expand7(<8 x float> %a) {
; SKX64-LABEL: expand7:
; SKX64: # %bb.0:
-; SKX64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; SKX64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; SKX64-NEXT: movw $1285, %ax # imm = 0x505
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
@@ -259,7 +259,7 @@
;
; KNL64-LABEL: expand7:
; KNL64: # %bb.0:
-; KNL64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL64-NEXT: movw $1285, %ax # imm = 0x505
; KNL64-NEXT: kmovw %eax, %k1
; KNL64-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
@@ -267,7 +267,7 @@
;
; SKX32-LABEL: expand7:
; SKX32: # %bb.0:
-; SKX32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; SKX32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; SKX32-NEXT: movw $1285, %ax # imm = 0x505
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
@@ -275,7 +275,7 @@
;
; KNL32-LABEL: expand7:
; KNL32: # %bb.0:
-; KNL32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL32-NEXT: movw $1285, %ax # imm = 0x505
; KNL32-NEXT: kmovw %eax, %k1
; KNL32-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
@@ -287,7 +287,7 @@
define <16 x float> @expand8(<8 x float> %a ) {
; SKX64-LABEL: expand8:
; SKX64: # %bb.0:
-; SKX64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; SKX64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; SKX64-NEXT: movw $-21846, %ax # imm = 0xAAAA
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
@@ -295,7 +295,7 @@
;
; KNL64-LABEL: expand8:
; KNL64: # %bb.0:
-; KNL64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL64-NEXT: movw $-21846, %ax # imm = 0xAAAA
; KNL64-NEXT: kmovw %eax, %k1
; KNL64-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
@@ -303,7 +303,7 @@
;
; SKX32-LABEL: expand8:
; SKX32: # %bb.0:
-; SKX32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; SKX32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; SKX32-NEXT: movw $-21846, %ax # imm = 0xAAAA
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
@@ -311,7 +311,7 @@
;
; KNL32-LABEL: expand8:
; KNL32: # %bb.0:
-; KNL32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL32-NEXT: movw $-21846, %ax # imm = 0xAAAA
; KNL32-NEXT: kmovw %eax, %k1
; KNL32-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
@@ -324,7 +324,7 @@
define <8 x double> @expand9(<4 x double> %a) {
; SKX64-LABEL: expand9:
; SKX64: # %bb.0:
-; SKX64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; SKX64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; SKX64-NEXT: movb $-127, %al
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vexpandpd %zmm0, %zmm0 {%k1} {z}
@@ -332,7 +332,7 @@
;
; KNL64-LABEL: expand9:
; KNL64: # %bb.0:
-; KNL64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL64-NEXT: movb $-127, %al
; KNL64-NEXT: kmovw %eax, %k1
; KNL64-NEXT: vexpandpd %zmm0, %zmm0 {%k1} {z}
@@ -340,7 +340,7 @@
;
; SKX32-LABEL: expand9:
; SKX32: # %bb.0:
-; SKX32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; SKX32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; SKX32-NEXT: movb $-127, %al
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vexpandpd %zmm0, %zmm0 {%k1} {z}
@@ -348,7 +348,7 @@
;
; KNL32-LABEL: expand9:
; KNL32: # %bb.0:
-; KNL32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL32-NEXT: movb $-127, %al
; KNL32-NEXT: kmovw %eax, %k1
; KNL32-NEXT: vexpandpd %zmm0, %zmm0 {%k1} {z}
@@ -360,7 +360,7 @@
define <16 x i32> @expand10(<8 x i32> %a ) {
; SKX64-LABEL: expand10:
; SKX64: # %bb.0:
-; SKX64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; SKX64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; SKX64-NEXT: movw $-21846, %ax # imm = 0xAAAA
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
@@ -368,7 +368,7 @@
;
; KNL64-LABEL: expand10:
; KNL64: # %bb.0:
-; KNL64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL64-NEXT: movw $-21846, %ax # imm = 0xAAAA
; KNL64-NEXT: kmovw %eax, %k1
; KNL64-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
@@ -376,7 +376,7 @@
;
; SKX32-LABEL: expand10:
; SKX32: # %bb.0:
-; SKX32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; SKX32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; SKX32-NEXT: movw $-21846, %ax # imm = 0xAAAA
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
@@ -384,7 +384,7 @@
;
; KNL32-LABEL: expand10:
; KNL32: # %bb.0:
-; KNL32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL32-NEXT: movw $-21846, %ax # imm = 0xAAAA
; KNL32-NEXT: kmovw %eax, %k1
; KNL32-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
@@ -396,7 +396,7 @@
define <8 x i64> @expand11(<4 x i64> %a) {
; SKX64-LABEL: expand11:
; SKX64: # %bb.0:
-; SKX64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; SKX64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; SKX64-NEXT: movb $-127, %al
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z}
@@ -404,7 +404,7 @@
;
; KNL64-LABEL: expand11:
; KNL64: # %bb.0:
-; KNL64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL64-NEXT: movb $-127, %al
; KNL64-NEXT: kmovw %eax, %k1
; KNL64-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z}
@@ -412,7 +412,7 @@
;
; SKX32-LABEL: expand11:
; SKX32: # %bb.0:
-; SKX32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; SKX32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; SKX32-NEXT: movb $-127, %al
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z}
@@ -420,7 +420,7 @@
;
; KNL32-LABEL: expand11:
; KNL32: # %bb.0:
-; KNL32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL32-NEXT: movb $-127, %al
; KNL32-NEXT: kmovw %eax, %k1
; KNL32-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z}
@@ -433,7 +433,7 @@
define <16 x float> @expand12(<8 x float> %a) {
; SKX64-LABEL: expand12:
; SKX64: # %bb.0:
-; SKX64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; SKX64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; SKX64-NEXT: vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16]
; SKX64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; SKX64-NEXT: vpermt2ps %zmm0, %zmm2, %zmm1
@@ -442,7 +442,7 @@
;
; KNL64-LABEL: expand12:
; KNL64: # %bb.0:
-; KNL64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL64-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL64-NEXT: vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16]
; KNL64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; KNL64-NEXT: vpermt2ps %zmm0, %zmm2, %zmm1
@@ -451,7 +451,7 @@
;
; SKX32-LABEL: expand12:
; SKX32: # %bb.0:
-; SKX32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; SKX32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; SKX32-NEXT: vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16]
; SKX32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; SKX32-NEXT: vpermt2ps %zmm0, %zmm2, %zmm1
@@ -460,7 +460,7 @@
;
; KNL32-LABEL: expand12:
; KNL32: # %bb.0:
-; KNL32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL32-NEXT: vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16]
; KNL32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; KNL32-NEXT: vpermt2ps %zmm0, %zmm2, %zmm1
@@ -503,7 +503,7 @@
define <8 x float> @expand14(<4 x float> %a) {
; SKX64-LABEL: expand14:
; SKX64: # %bb.0:
-; SKX64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; SKX64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; SKX64-NEXT: movb $20, %al
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z}
@@ -520,7 +520,7 @@
;
; SKX32-LABEL: expand14:
; SKX32: # %bb.0:
-; SKX32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; SKX32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; SKX32-NEXT: movb $20, %al
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z}
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
index 7274349..c157233 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
@@ -196,13 +196,13 @@
define <32 x i8> @combine_pshufb_as_vpbroadcastb256(<2 x i64> %a) {
; X32-LABEL: combine_pshufb_as_vpbroadcastb256:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-NEXT: vpbroadcastb %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_vpbroadcastb256:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-NEXT: vpbroadcastb %xmm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <2 x i64> %a, <2 x i64> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
@@ -231,13 +231,13 @@
define <32 x i8> @combine_pshufb_as_vpbroadcastw256(<2 x i64> %a) {
; X32-LABEL: combine_pshufb_as_vpbroadcastw256:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-NEXT: vpbroadcastw %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_vpbroadcastw256:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-NEXT: vpbroadcastw %xmm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <2 x i64> %a, <2 x i64> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
@@ -269,14 +269,14 @@
define <8 x i32> @combine_permd_as_vpbroadcastd256(<4 x i32> %a) {
; X32-LABEL: combine_permd_as_vpbroadcastd256:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-NEXT: vpbroadcastd %xmm0, %ymm0
; X32-NEXT: vpaddd {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_permd_as_vpbroadcastd256:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-NEXT: vpbroadcastd %xmm0, %ymm0
; X64-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
@@ -303,14 +303,14 @@
define <8 x i32> @combine_permd_as_vpbroadcastq256(<4 x i32> %a) {
; X32-LABEL: combine_permd_as_vpbroadcastq256:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-NEXT: vpbroadcastq %xmm0, %ymm0
; X32-NEXT: vpaddd {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_permd_as_vpbroadcastq256:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-NEXT: vpbroadcastq %xmm0, %ymm0
; X64-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
@@ -339,13 +339,13 @@
define <8 x float> @combine_permps_as_vpbroadcastss256(<4 x float> %a) {
; X32-LABEL: combine_permps_as_vpbroadcastss256:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-NEXT: vbroadcastss %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_permps_as_vpbroadcastss256:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-NEXT: vbroadcastss %xmm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -356,13 +356,13 @@
define <4 x double> @combine_permps_as_vpbroadcastsd256(<2 x double> %a) {
; X32-LABEL: combine_permps_as_vpbroadcastsd256:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-NEXT: vbroadcastsd %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_permps_as_vpbroadcastsd256:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-NEXT: vbroadcastsd %xmm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <2 x double> %a, <2 x double> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-v1.ll b/llvm/test/CodeGen/X86/vector-shuffle-v1.ll
index 66bd70e..901d83c 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-v1.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-v1.ll
@@ -12,7 +12,7 @@
; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -51,7 +51,7 @@
; AVX512F-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -93,7 +93,7 @@
; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,2,1,0]
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -131,7 +131,7 @@
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -487,7 +487,7 @@
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -531,7 +531,7 @@
; AVX512F-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -547,7 +547,7 @@
; AVX512VL-NEXT: vpslld $31, %ymm2, %ymm0
; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0
; AVX512VL-NEXT: kmovw %k0, %eax
-; AVX512VL-NEXT: # kill: def %al killed %al killed %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
@@ -561,7 +561,7 @@
; VL_BW_DQ-NEXT: vpermi2d %ymm1, %ymm0, %ymm2
; VL_BW_DQ-NEXT: vpmovd2m %ymm2, %k0
; VL_BW_DQ-NEXT: kmovd %k0, %eax
-; VL_BW_DQ-NEXT: # kill: def %al killed %al killed %eax
+; VL_BW_DQ-NEXT: # kill: def $al killed $al killed $eax
; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
@@ -579,7 +579,7 @@
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -592,7 +592,7 @@
; AVX512VL-NEXT: vpslld $31, %ymm0, %ymm0
; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0
; AVX512VL-NEXT: kmovw %k0, %eax
-; AVX512VL-NEXT: # kill: def %al killed %al killed %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
@@ -603,7 +603,7 @@
; VL_BW_DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; VL_BW_DQ-NEXT: vpmovd2m %ymm0, %k0
; VL_BW_DQ-NEXT: kmovd %k0, %eax
-; VL_BW_DQ-NEXT: # kill: def %al killed %al killed %eax
+; VL_BW_DQ-NEXT: # kill: def $al killed $al killed $eax
; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
@@ -622,7 +622,7 @@
; AVX512F-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; AVX512F-NEXT: vptestmq %zmm2, %zmm2, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -636,7 +636,7 @@
; AVX512VL-NEXT: vpermi2d %ymm1, %ymm0, %ymm2
; AVX512VL-NEXT: vptestmd %ymm2, %ymm2, %k0
; AVX512VL-NEXT: kmovw %k0, %eax
-; AVX512VL-NEXT: # kill: def %al killed %al killed %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
@@ -649,7 +649,7 @@
; VL_BW_DQ-NEXT: vpermi2d %ymm1, %ymm0, %ymm2
; VL_BW_DQ-NEXT: vpmovd2m %ymm2, %k0
; VL_BW_DQ-NEXT: kmovd %k0, %eax
-; VL_BW_DQ-NEXT: # kill: def %al killed %al killed %eax
+; VL_BW_DQ-NEXT: # kill: def $al killed $al killed $eax
; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
@@ -668,7 +668,7 @@
; AVX512F-NEXT: vpermt2q %zmm0, %zmm1, %zmm2
; AVX512F-NEXT: vptestmq %zmm2, %zmm2, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -682,7 +682,7 @@
; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3],ymm1[4,5,6,7]
; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0
; AVX512VL-NEXT: kmovw %k0, %eax
-; AVX512VL-NEXT: # kill: def %al killed %al killed %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
@@ -695,7 +695,7 @@
; VL_BW_DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3],ymm1[4,5,6,7]
; VL_BW_DQ-NEXT: vpmovd2m %ymm0, %k0
; VL_BW_DQ-NEXT: kmovd %k0, %eax
-; VL_BW_DQ-NEXT: # kill: def %al killed %al killed %eax
+; VL_BW_DQ-NEXT: # kill: def $al killed $al killed $eax
; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
@@ -714,7 +714,7 @@
; AVX512F-NEXT: vpermt2q %zmm0, %zmm1, %zmm2
; AVX512F-NEXT: vptestmq %zmm2, %zmm2, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -727,7 +727,7 @@
; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],mem[1,2,3,4,5,6,7]
; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0
; AVX512VL-NEXT: kmovw %k0, %eax
-; AVX512VL-NEXT: # kill: def %al killed %al killed %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
@@ -739,7 +739,7 @@
; VL_BW_DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],mem[1,2,3,4,5,6,7]
; VL_BW_DQ-NEXT: vpmovd2m %ymm0, %k0
; VL_BW_DQ-NEXT: kmovd %k0, %eax
-; VL_BW_DQ-NEXT: # kill: def %al killed %al killed %eax
+; VL_BW_DQ-NEXT: # kill: def $al killed $al killed $eax
; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
@@ -760,7 +760,7 @@
; AVX512F-NEXT: vpermt2q %zmm0, %zmm1, %zmm2
; AVX512F-NEXT: vptestmq %zmm2, %zmm2, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %al killed %al killed %eax
+; AVX512F-NEXT: # kill: def $al killed $al killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -775,7 +775,7 @@
; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0
; AVX512VL-NEXT: kmovw %k0, %eax
-; AVX512VL-NEXT: # kill: def %al killed %al killed %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
@@ -789,7 +789,7 @@
; VL_BW_DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
; VL_BW_DQ-NEXT: vpmovd2m %ymm0, %k0
; VL_BW_DQ-NEXT: kmovd %k0, %eax
-; VL_BW_DQ-NEXT: # kill: def %al killed %al killed %eax
+; VL_BW_DQ-NEXT: # kill: def $al killed $al killed $eax
; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%c = shufflevector <8 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, <8 x i1> %a, <8 x i32> <i32 9, i32 6, i32 1, i32 0, i32 3, i32 7, i32 7, i32 0>
@@ -806,7 +806,7 @@
; AVX512F-NEXT: vpbroadcastd %xmm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX512F-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -817,7 +817,7 @@
; AVX512VL-NEXT: vpbroadcastd %xmm0, %zmm0
; AVX512VL-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512VL-NEXT: kmovw %k0, %eax
-; AVX512VL-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX512VL-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
@@ -828,7 +828,7 @@
; VL_BW_DQ-NEXT: vpbroadcastd %xmm0, %zmm0
; VL_BW_DQ-NEXT: vpmovd2m %zmm0, %k0
; VL_BW_DQ-NEXT: kmovd %k0, %eax
-; VL_BW_DQ-NEXT: # kill: def %ax killed %ax killed %eax
+; VL_BW_DQ-NEXT: # kill: def $ax killed $ax killed $eax
; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%b = bitcast i16 %a to <16 x i1>
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-variable-128.ll b/llvm/test/CodeGen/X86/vector-shuffle-variable-128.ll
index 4de24d5..08d67a2 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-variable-128.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-variable-128.ll
@@ -37,8 +37,8 @@
define <2 x i64> @var_shuffle_v2i64_v2i64_xx_i64(<2 x i64> %x, i32 %i0, i32 %i1) nounwind {
; SSE-LABEL: var_shuffle_v2i64_v2i64_xx_i64:
; SSE: # %bb.0:
-; SSE-NEXT: # kill: def %esi killed %esi def %rsi
-; SSE-NEXT: # kill: def %edi killed %edi def %rdi
+; SSE-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE-NEXT: # kill: def $edi killed $edi def $rdi
; SSE-NEXT: andl $1, %edi
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $1, %esi
@@ -49,8 +49,8 @@
;
; AVX-LABEL: var_shuffle_v2i64_v2i64_xx_i64:
; AVX: # %bb.0:
-; AVX-NEXT: # kill: def %esi killed %esi def %rsi
-; AVX-NEXT: # kill: def %edi killed %edi def %rdi
+; AVX-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX-NEXT: # kill: def $edi killed $edi def $rdi
; AVX-NEXT: andl $1, %edi
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: andl $1, %esi
@@ -68,10 +68,10 @@
define <4 x float> @var_shuffle_v4f32_v4f32_xxxx_i32(<4 x float> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind {
; SSE2-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: # kill: def %ecx killed %ecx def %rcx
-; SSE2-NEXT: # kill: def %edx killed %edx def %rdx
-; SSE2-NEXT: # kill: def %esi killed %esi def %rsi
-; SSE2-NEXT: # kill: def %edi killed %edi def %rdi
+; SSE2-NEXT: # kill: def $ecx killed $ecx def $rcx
+; SSE2-NEXT: # kill: def $edx killed $edx def $rdx
+; SSE2-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE2-NEXT: # kill: def $edi killed $edi def $rdi
; SSE2-NEXT: andl $3, %edi
; SSE2-NEXT: andl $3, %esi
; SSE2-NEXT: andl $3, %edx
@@ -88,10 +88,10 @@
;
; SSSE3-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: # kill: def %ecx killed %ecx def %rcx
-; SSSE3-NEXT: # kill: def %edx killed %edx def %rdx
-; SSSE3-NEXT: # kill: def %esi killed %esi def %rsi
-; SSSE3-NEXT: # kill: def %edi killed %edi def %rdi
+; SSSE3-NEXT: # kill: def $ecx killed $ecx def $rcx
+; SSSE3-NEXT: # kill: def $edx killed $edx def $rdx
+; SSSE3-NEXT: # kill: def $esi killed $esi def $rsi
+; SSSE3-NEXT: # kill: def $edi killed $edi def $rdi
; SSSE3-NEXT: andl $3, %edi
; SSSE3-NEXT: andl $3, %esi
; SSSE3-NEXT: andl $3, %edx
@@ -108,10 +108,10 @@
;
; SSE41-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
; SSE41: # %bb.0:
-; SSE41-NEXT: # kill: def %ecx killed %ecx def %rcx
-; SSE41-NEXT: # kill: def %edx killed %edx def %rdx
-; SSE41-NEXT: # kill: def %esi killed %esi def %rsi
-; SSE41-NEXT: # kill: def %edi killed %edi def %rdi
+; SSE41-NEXT: # kill: def $ecx killed $ecx def $rcx
+; SSE41-NEXT: # kill: def $edx killed $edx def $rdx
+; SSE41-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE41-NEXT: # kill: def $edi killed $edi def $rdi
; SSE41-NEXT: andl $3, %edi
; SSE41-NEXT: andl $3, %esi
; SSE41-NEXT: andl $3, %edx
@@ -125,10 +125,10 @@
;
; AVX-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
; AVX: # %bb.0:
-; AVX-NEXT: # kill: def %ecx killed %ecx def %rcx
-; AVX-NEXT: # kill: def %edx killed %edx def %rdx
-; AVX-NEXT: # kill: def %esi killed %esi def %rsi
-; AVX-NEXT: # kill: def %edi killed %edi def %rdi
+; AVX-NEXT: # kill: def $ecx killed $ecx def $rcx
+; AVX-NEXT: # kill: def $edx killed $edx def $rdx
+; AVX-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX-NEXT: # kill: def $edi killed $edi def $rdi
; AVX-NEXT: andl $3, %edi
; AVX-NEXT: andl $3, %esi
; AVX-NEXT: andl $3, %edx
@@ -153,10 +153,10 @@
define <4 x i32> @var_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind {
; SSE2-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: # kill: def %ecx killed %ecx def %rcx
-; SSE2-NEXT: # kill: def %edx killed %edx def %rdx
-; SSE2-NEXT: # kill: def %esi killed %esi def %rsi
-; SSE2-NEXT: # kill: def %edi killed %edi def %rdi
+; SSE2-NEXT: # kill: def $ecx killed $ecx def $rcx
+; SSE2-NEXT: # kill: def $edx killed $edx def $rdx
+; SSE2-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE2-NEXT: # kill: def $edi killed $edi def $rdi
; SSE2-NEXT: andl $3, %edi
; SSE2-NEXT: andl $3, %esi
; SSE2-NEXT: andl $3, %edx
@@ -173,10 +173,10 @@
;
; SSSE3-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: # kill: def %ecx killed %ecx def %rcx
-; SSSE3-NEXT: # kill: def %edx killed %edx def %rdx
-; SSSE3-NEXT: # kill: def %esi killed %esi def %rsi
-; SSSE3-NEXT: # kill: def %edi killed %edi def %rdi
+; SSSE3-NEXT: # kill: def $ecx killed $ecx def $rcx
+; SSSE3-NEXT: # kill: def $edx killed $edx def $rdx
+; SSSE3-NEXT: # kill: def $esi killed $esi def $rsi
+; SSSE3-NEXT: # kill: def $edi killed $edi def $rdi
; SSSE3-NEXT: andl $3, %edi
; SSSE3-NEXT: andl $3, %esi
; SSSE3-NEXT: andl $3, %edx
@@ -193,10 +193,10 @@
;
; SSE41-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
; SSE41: # %bb.0:
-; SSE41-NEXT: # kill: def %ecx killed %ecx def %rcx
-; SSE41-NEXT: # kill: def %edx killed %edx def %rdx
-; SSE41-NEXT: # kill: def %esi killed %esi def %rsi
-; SSE41-NEXT: # kill: def %edi killed %edi def %rdi
+; SSE41-NEXT: # kill: def $ecx killed $ecx def $rcx
+; SSE41-NEXT: # kill: def $edx killed $edx def $rdx
+; SSE41-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE41-NEXT: # kill: def $edi killed $edi def $rdi
; SSE41-NEXT: andl $3, %edi
; SSE41-NEXT: andl $3, %esi
; SSE41-NEXT: andl $3, %edx
@@ -210,10 +210,10 @@
;
; AVX-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
; AVX: # %bb.0:
-; AVX-NEXT: # kill: def %ecx killed %ecx def %rcx
-; AVX-NEXT: # kill: def %edx killed %edx def %rdx
-; AVX-NEXT: # kill: def %esi killed %esi def %rsi
-; AVX-NEXT: # kill: def %edi killed %edi def %rdi
+; AVX-NEXT: # kill: def $ecx killed $ecx def $rcx
+; AVX-NEXT: # kill: def $edx killed $edx def $rdx
+; AVX-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX-NEXT: # kill: def $edi killed $edi def $rdi
; AVX-NEXT: andl $3, %edi
; AVX-NEXT: andl $3, %esi
; AVX-NEXT: andl $3, %edx
@@ -238,12 +238,12 @@
define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i16 %i1, i16 %i2, i16 %i3, i16 %i4, i16 %i5, i16 %i6, i16 %i7) nounwind {
; SSE2-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
; SSE2: # %bb.0:
-; SSE2-NEXT: # kill: def %r9d killed %r9d def %r9
-; SSE2-NEXT: # kill: def %r8d killed %r8d def %r8
-; SSE2-NEXT: # kill: def %ecx killed %ecx def %rcx
-; SSE2-NEXT: # kill: def %edx killed %edx def %rdx
-; SSE2-NEXT: # kill: def %esi killed %esi def %rsi
-; SSE2-NEXT: # kill: def %edi killed %edi def %rdi
+; SSE2-NEXT: # kill: def $r9d killed $r9d def $r9
+; SSE2-NEXT: # kill: def $r8d killed $r8d def $r8
+; SSE2-NEXT: # kill: def $ecx killed $ecx def $rcx
+; SSE2-NEXT: # kill: def $edx killed $edx def $rdx
+; SSE2-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE2-NEXT: # kill: def $edi killed $edi def $rdi
; SSE2-NEXT: andl $7, %edi
; SSE2-NEXT: andl $7, %esi
; SSE2-NEXT: andl $7, %edx
@@ -282,12 +282,12 @@
;
; SSSE3-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: # kill: def %r9d killed %r9d def %r9
-; SSSE3-NEXT: # kill: def %r8d killed %r8d def %r8
-; SSSE3-NEXT: # kill: def %ecx killed %ecx def %rcx
-; SSSE3-NEXT: # kill: def %edx killed %edx def %rdx
-; SSSE3-NEXT: # kill: def %esi killed %esi def %rsi
-; SSSE3-NEXT: # kill: def %edi killed %edi def %rdi
+; SSSE3-NEXT: # kill: def $r9d killed $r9d def $r9
+; SSSE3-NEXT: # kill: def $r8d killed $r8d def $r8
+; SSSE3-NEXT: # kill: def $ecx killed $ecx def $rcx
+; SSSE3-NEXT: # kill: def $edx killed $edx def $rdx
+; SSSE3-NEXT: # kill: def $esi killed $esi def $rsi
+; SSSE3-NEXT: # kill: def $edi killed $edi def $rdi
; SSSE3-NEXT: andl $7, %edi
; SSSE3-NEXT: andl $7, %esi
; SSSE3-NEXT: andl $7, %edx
@@ -326,12 +326,12 @@
;
; SSE41-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
; SSE41: # %bb.0:
-; SSE41-NEXT: # kill: def %r9d killed %r9d def %r9
-; SSE41-NEXT: # kill: def %r8d killed %r8d def %r8
-; SSE41-NEXT: # kill: def %ecx killed %ecx def %rcx
-; SSE41-NEXT: # kill: def %edx killed %edx def %rdx
-; SSE41-NEXT: # kill: def %esi killed %esi def %rsi
-; SSE41-NEXT: # kill: def %edi killed %edi def %rdi
+; SSE41-NEXT: # kill: def $r9d killed $r9d def $r9
+; SSE41-NEXT: # kill: def $r8d killed $r8d def $r8
+; SSE41-NEXT: # kill: def $ecx killed $ecx def $rcx
+; SSE41-NEXT: # kill: def $edx killed $edx def $rdx
+; SSE41-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE41-NEXT: # kill: def $edi killed $edi def $rdi
; SSE41-NEXT: andl $7, %edi
; SSE41-NEXT: andl $7, %esi
; SSE41-NEXT: andl $7, %edx
@@ -356,12 +356,12 @@
;
; AVX-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
; AVX: # %bb.0:
-; AVX-NEXT: # kill: def %r9d killed %r9d def %r9
-; AVX-NEXT: # kill: def %r8d killed %r8d def %r8
-; AVX-NEXT: # kill: def %ecx killed %ecx def %rcx
-; AVX-NEXT: # kill: def %edx killed %edx def %rdx
-; AVX-NEXT: # kill: def %esi killed %esi def %rsi
-; AVX-NEXT: # kill: def %edi killed %edi def %rdi
+; AVX-NEXT: # kill: def $r9d killed $r9d def $r9
+; AVX-NEXT: # kill: def $r8d killed $r8d def $r8
+; AVX-NEXT: # kill: def $ecx killed $ecx def $rcx
+; AVX-NEXT: # kill: def $edx killed $edx def $rdx
+; AVX-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX-NEXT: # kill: def $edi killed $edi def $rdi
; AVX-NEXT: andl $7, %edi
; AVX-NEXT: andl $7, %esi
; AVX-NEXT: andl $7, %edx
@@ -405,12 +405,12 @@
define <16 x i8> @var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8 %i0, i8 %i1, i8 %i2, i8 %i3, i8 %i4, i8 %i5, i8 %i6, i8 %i7, i8 %i8, i8 %i9, i8 %i10, i8 %i11, i8 %i12, i8 %i13, i8 %i14, i8 %i15) nounwind {
; SSE2-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; SSE2: # %bb.0:
-; SSE2-NEXT: # kill: def %r9d killed %r9d def %r9
-; SSE2-NEXT: # kill: def %r8d killed %r8d def %r8
-; SSE2-NEXT: # kill: def %ecx killed %ecx def %rcx
-; SSE2-NEXT: # kill: def %edx killed %edx def %rdx
-; SSE2-NEXT: # kill: def %esi killed %esi def %rsi
-; SSE2-NEXT: # kill: def %edi killed %edi def %rdi
+; SSE2-NEXT: # kill: def $r9d killed $r9d def $r9
+; SSE2-NEXT: # kill: def $r8d killed $r8d def $r8
+; SSE2-NEXT: # kill: def $ecx killed $ecx def $rcx
+; SSE2-NEXT: # kill: def $edx killed $edx def $rdx
+; SSE2-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE2-NEXT: # kill: def $edi killed $edi def $rdi
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: andl $15, %eax
@@ -489,12 +489,12 @@
;
; SSSE3-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: # kill: def %r9d killed %r9d def %r9
-; SSSE3-NEXT: # kill: def %r8d killed %r8d def %r8
-; SSSE3-NEXT: # kill: def %ecx killed %ecx def %rcx
-; SSSE3-NEXT: # kill: def %edx killed %edx def %rdx
-; SSSE3-NEXT: # kill: def %esi killed %esi def %rsi
-; SSSE3-NEXT: # kill: def %edi killed %edi def %rdi
+; SSSE3-NEXT: # kill: def $r9d killed $r9d def $r9
+; SSSE3-NEXT: # kill: def $r8d killed $r8d def $r8
+; SSSE3-NEXT: # kill: def $ecx killed $ecx def $rcx
+; SSSE3-NEXT: # kill: def $edx killed $edx def $rdx
+; SSSE3-NEXT: # kill: def $esi killed $esi def $rsi
+; SSSE3-NEXT: # kill: def $edi killed $edi def $rdi
; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSSE3-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: andl $15, %eax
@@ -573,12 +573,12 @@
;
; SSE41-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; SSE41: # %bb.0:
-; SSE41-NEXT: # kill: def %r9d killed %r9d def %r9
-; SSE41-NEXT: # kill: def %r8d killed %r8d def %r8
-; SSE41-NEXT: # kill: def %ecx killed %ecx def %rcx
-; SSE41-NEXT: # kill: def %edx killed %edx def %rdx
-; SSE41-NEXT: # kill: def %esi killed %esi def %rsi
-; SSE41-NEXT: # kill: def %edi killed %edi def %rdi
+; SSE41-NEXT: # kill: def $r9d killed $r9d def $r9
+; SSE41-NEXT: # kill: def $r8d killed $r8d def $r8
+; SSE41-NEXT: # kill: def $ecx killed $ecx def $rcx
+; SSE41-NEXT: # kill: def $edx killed $edx def $rdx
+; SSE41-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE41-NEXT: # kill: def $edi killed $edi def $rdi
; SSE41-NEXT: andl $15, %edi
; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE41-NEXT: movzbl -24(%rsp,%rdi), %eax
@@ -627,12 +627,12 @@
;
; AVX-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; AVX: # %bb.0:
-; AVX-NEXT: # kill: def %r9d killed %r9d def %r9
-; AVX-NEXT: # kill: def %r8d killed %r8d def %r8
-; AVX-NEXT: # kill: def %ecx killed %ecx def %rcx
-; AVX-NEXT: # kill: def %edx killed %edx def %rdx
-; AVX-NEXT: # kill: def %esi killed %esi def %rsi
-; AVX-NEXT: # kill: def %edi killed %edi def %rdi
+; AVX-NEXT: # kill: def $r9d killed $r9d def $r9
+; AVX-NEXT: # kill: def $r8d killed $r8d def $r8
+; AVX-NEXT: # kill: def $ecx killed $ecx def $rcx
+; AVX-NEXT: # kill: def $edx killed $edx def $rdx
+; AVX-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX-NEXT: # kill: def $edi killed $edi def $rdi
; AVX-NEXT: andl $15, %edi
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: movzbl -24(%rsp,%rdi), %eax
@@ -1160,9 +1160,9 @@
define <4 x float> @var_shuffle_v4f32_v4f32_x0yx_i32(<4 x float> %x, <4 x float> %y, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind {
; SSE-LABEL: var_shuffle_v4f32_v4f32_x0yx_i32:
; SSE: # %bb.0:
-; SSE-NEXT: # kill: def %ecx killed %ecx def %rcx
-; SSE-NEXT: # kill: def %edx killed %edx def %rdx
-; SSE-NEXT: # kill: def %edi killed %edi def %rdi
+; SSE-NEXT: # kill: def $ecx killed $ecx def $rcx
+; SSE-NEXT: # kill: def $edx killed $edx def $rdx
+; SSE-NEXT: # kill: def $edi killed $edi def $rdi
; SSE-NEXT: andl $3, %edi
; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $3, %edx
@@ -1177,9 +1177,9 @@
;
; AVX-LABEL: var_shuffle_v4f32_v4f32_x0yx_i32:
; AVX: # %bb.0:
-; AVX-NEXT: # kill: def %ecx killed %ecx def %rcx
-; AVX-NEXT: # kill: def %edx killed %edx def %rdx
-; AVX-NEXT: # kill: def %edi killed %edi def %rdi
+; AVX-NEXT: # kill: def $ecx killed $ecx def $rcx
+; AVX-NEXT: # kill: def $edx killed $edx def $rdx
+; AVX-NEXT: # kill: def $edi killed $edi def $rdi
; AVX-NEXT: andl $3, %edi
; AVX-NEXT: vmovaps %xmm1, -{{[0-9]+}}(%rsp)
; AVX-NEXT: andl $3, %edx
@@ -1205,12 +1205,12 @@
define <8 x i16> @var_shuffle_v8i16_v8i16_xyxyxy00_i16(<8 x i16> %x, <8 x i16> %y, i16 %i0, i16 %i1, i16 %i2, i16 %i3, i16 %i4, i16 %i5, i16 %i6, i16 %i7) nounwind {
; SSE2-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
; SSE2: # %bb.0:
-; SSE2-NEXT: # kill: def %r9d killed %r9d def %r9
-; SSE2-NEXT: # kill: def %r8d killed %r8d def %r8
-; SSE2-NEXT: # kill: def %ecx killed %ecx def %rcx
-; SSE2-NEXT: # kill: def %edx killed %edx def %rdx
-; SSE2-NEXT: # kill: def %esi killed %esi def %rsi
-; SSE2-NEXT: # kill: def %edi killed %edi def %rdi
+; SSE2-NEXT: # kill: def $r9d killed $r9d def $r9
+; SSE2-NEXT: # kill: def $r8d killed $r8d def $r8
+; SSE2-NEXT: # kill: def $ecx killed $ecx def $rcx
+; SSE2-NEXT: # kill: def $edx killed $edx def $rdx
+; SSE2-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE2-NEXT: # kill: def $edi killed $edi def $rdi
; SSE2-NEXT: andl $7, %edi
; SSE2-NEXT: andl $7, %esi
; SSE2-NEXT: andl $7, %edx
@@ -1242,12 +1242,12 @@
;
; SSSE3-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: # kill: def %r9d killed %r9d def %r9
-; SSSE3-NEXT: # kill: def %r8d killed %r8d def %r8
-; SSSE3-NEXT: # kill: def %ecx killed %ecx def %rcx
-; SSSE3-NEXT: # kill: def %edx killed %edx def %rdx
-; SSSE3-NEXT: # kill: def %esi killed %esi def %rsi
-; SSSE3-NEXT: # kill: def %edi killed %edi def %rdi
+; SSSE3-NEXT: # kill: def $r9d killed $r9d def $r9
+; SSSE3-NEXT: # kill: def $r8d killed $r8d def $r8
+; SSSE3-NEXT: # kill: def $ecx killed $ecx def $rcx
+; SSSE3-NEXT: # kill: def $edx killed $edx def $rdx
+; SSSE3-NEXT: # kill: def $esi killed $esi def $rsi
+; SSSE3-NEXT: # kill: def $edi killed $edi def $rdi
; SSSE3-NEXT: andl $7, %edi
; SSSE3-NEXT: andl $7, %esi
; SSSE3-NEXT: andl $7, %edx
@@ -1279,12 +1279,12 @@
;
; SSE41-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
; SSE41: # %bb.0:
-; SSE41-NEXT: # kill: def %r9d killed %r9d def %r9
-; SSE41-NEXT: # kill: def %r8d killed %r8d def %r8
-; SSE41-NEXT: # kill: def %ecx killed %ecx def %rcx
-; SSE41-NEXT: # kill: def %edx killed %edx def %rdx
-; SSE41-NEXT: # kill: def %esi killed %esi def %rsi
-; SSE41-NEXT: # kill: def %edi killed %edi def %rdi
+; SSE41-NEXT: # kill: def $r9d killed $r9d def $r9
+; SSE41-NEXT: # kill: def $r8d killed $r8d def $r8
+; SSE41-NEXT: # kill: def $ecx killed $ecx def $rcx
+; SSE41-NEXT: # kill: def $edx killed $edx def $rdx
+; SSE41-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE41-NEXT: # kill: def $edi killed $edi def $rdi
; SSE41-NEXT: andl $7, %edi
; SSE41-NEXT: andl $7, %esi
; SSE41-NEXT: andl $7, %edx
@@ -1304,12 +1304,12 @@
;
; AVX-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
; AVX: # %bb.0:
-; AVX-NEXT: # kill: def %r9d killed %r9d def %r9
-; AVX-NEXT: # kill: def %r8d killed %r8d def %r8
-; AVX-NEXT: # kill: def %ecx killed %ecx def %rcx
-; AVX-NEXT: # kill: def %edx killed %edx def %rdx
-; AVX-NEXT: # kill: def %esi killed %esi def %rsi
-; AVX-NEXT: # kill: def %edi killed %edi def %rdi
+; AVX-NEXT: # kill: def $r9d killed $r9d def $r9
+; AVX-NEXT: # kill: def $r8d killed $r8d def $r8
+; AVX-NEXT: # kill: def $ecx killed $ecx def $rcx
+; AVX-NEXT: # kill: def $edx killed $edx def $rdx
+; AVX-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX-NEXT: # kill: def $edi killed $edi def $rdi
; AVX-NEXT: andl $7, %edi
; AVX-NEXT: andl $7, %esi
; AVX-NEXT: andl $7, %edx
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-variable-256.ll b/llvm/test/CodeGen/X86/vector-shuffle-variable-256.ll
index 91672d07..c726a14 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-variable-256.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-variable-256.ll
@@ -185,12 +185,12 @@
; ALL-NEXT: movq %rsp, %rbp
; ALL-NEXT: andq $-32, %rsp
; ALL-NEXT: subq $64, %rsp
-; ALL-NEXT: # kill: def %r9d killed %r9d def %r9
-; ALL-NEXT: # kill: def %r8d killed %r8d def %r8
-; ALL-NEXT: # kill: def %ecx killed %ecx def %rcx
-; ALL-NEXT: # kill: def %edx killed %edx def %rdx
-; ALL-NEXT: # kill: def %esi killed %esi def %rsi
-; ALL-NEXT: # kill: def %edi killed %edi def %rdi
+; ALL-NEXT: # kill: def $r9d killed $r9d def $r9
+; ALL-NEXT: # kill: def $r8d killed $r8d def $r8
+; ALL-NEXT: # kill: def $ecx killed $ecx def $rcx
+; ALL-NEXT: # kill: def $edx killed $edx def $rdx
+; ALL-NEXT: # kill: def $esi killed $esi def $rsi
+; ALL-NEXT: # kill: def $edi killed $edi def $rdi
; ALL-NEXT: andl $7, %edi
; ALL-NEXT: andl $7, %esi
; ALL-NEXT: andl $7, %edx
@@ -236,12 +236,12 @@
define <8 x float> @var_shuffle_v8f32_v4f32_xxxxxxxx_i32(<4 x float> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7) nounwind {
; ALL-LABEL: var_shuffle_v8f32_v4f32_xxxxxxxx_i32:
; ALL: # %bb.0:
-; ALL-NEXT: # kill: def %r9d killed %r9d def %r9
-; ALL-NEXT: # kill: def %r8d killed %r8d def %r8
-; ALL-NEXT: # kill: def %ecx killed %ecx def %rcx
-; ALL-NEXT: # kill: def %edx killed %edx def %rdx
-; ALL-NEXT: # kill: def %esi killed %esi def %rsi
-; ALL-NEXT: # kill: def %edi killed %edi def %rdi
+; ALL-NEXT: # kill: def $r9d killed $r9d def $r9
+; ALL-NEXT: # kill: def $r8d killed $r8d def $r8
+; ALL-NEXT: # kill: def $ecx killed $ecx def $rcx
+; ALL-NEXT: # kill: def $edx killed $edx def $rdx
+; ALL-NEXT: # kill: def $esi killed $esi def $rsi
+; ALL-NEXT: # kill: def $edi killed $edi def $rdi
; ALL-NEXT: andl $3, %edi
; ALL-NEXT: andl $3, %esi
; ALL-NEXT: andl $3, %edx
@@ -289,12 +289,12 @@
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
; AVX1-NEXT: subq $64, %rsp
-; AVX1-NEXT: # kill: def %r9d killed %r9d def %r9
-; AVX1-NEXT: # kill: def %r8d killed %r8d def %r8
-; AVX1-NEXT: # kill: def %ecx killed %ecx def %rcx
-; AVX1-NEXT: # kill: def %edx killed %edx def %rdx
-; AVX1-NEXT: # kill: def %esi killed %esi def %rsi
-; AVX1-NEXT: # kill: def %edi killed %edi def %rdi
+; AVX1-NEXT: # kill: def $r9d killed $r9d def $r9
+; AVX1-NEXT: # kill: def $r8d killed $r8d def $r8
+; AVX1-NEXT: # kill: def $ecx killed $ecx def $rcx
+; AVX1-NEXT: # kill: def $edx killed $edx def $rdx
+; AVX1-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX1-NEXT: # kill: def $edi killed $edi def $rdi
; AVX1-NEXT: andl $15, %edi
; AVX1-NEXT: vmovaps %ymm0, (%rsp)
; AVX1-NEXT: movzwl (%rsp,%rdi,2), %eax
@@ -351,12 +351,12 @@
; AVX2-NEXT: movq %rsp, %rbp
; AVX2-NEXT: andq $-32, %rsp
; AVX2-NEXT: subq $64, %rsp
-; AVX2-NEXT: # kill: def %r9d killed %r9d def %r9
-; AVX2-NEXT: # kill: def %r8d killed %r8d def %r8
-; AVX2-NEXT: # kill: def %ecx killed %ecx def %rcx
-; AVX2-NEXT: # kill: def %edx killed %edx def %rdx
-; AVX2-NEXT: # kill: def %esi killed %esi def %rsi
-; AVX2-NEXT: # kill: def %edi killed %edi def %rdi
+; AVX2-NEXT: # kill: def $r9d killed $r9d def $r9
+; AVX2-NEXT: # kill: def $r8d killed $r8d def $r8
+; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT: # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX2-NEXT: # kill: def $edi killed $edi def $rdi
; AVX2-NEXT: andl $15, %edi
; AVX2-NEXT: vmovaps %ymm0, (%rsp)
; AVX2-NEXT: movzwl (%rsp,%rdi,2), %eax
@@ -444,12 +444,12 @@
define <16 x i16> @var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16(<8 x i16> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, i32 %i10, i32 %i11, i32 %i12, i32 %i13, i32 %i14, i32 %i15) nounwind {
; AVX1-LABEL: var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16:
; AVX1: # %bb.0:
-; AVX1-NEXT: # kill: def %r9d killed %r9d def %r9
-; AVX1-NEXT: # kill: def %r8d killed %r8d def %r8
-; AVX1-NEXT: # kill: def %ecx killed %ecx def %rcx
-; AVX1-NEXT: # kill: def %edx killed %edx def %rdx
-; AVX1-NEXT: # kill: def %esi killed %esi def %rsi
-; AVX1-NEXT: # kill: def %edi killed %edi def %rdi
+; AVX1-NEXT: # kill: def $r9d killed $r9d def $r9
+; AVX1-NEXT: # kill: def $r8d killed $r8d def $r8
+; AVX1-NEXT: # kill: def $ecx killed $ecx def $rcx
+; AVX1-NEXT: # kill: def $edx killed $edx def $rdx
+; AVX1-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX1-NEXT: # kill: def $edi killed $edi def $rdi
; AVX1-NEXT: andl $7, %edi
; AVX1-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX1-NEXT: movzwl -24(%rsp,%rdi,2), %eax
@@ -500,12 +500,12 @@
;
; AVX2-LABEL: var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: def %r9d killed %r9d def %r9
-; AVX2-NEXT: # kill: def %r8d killed %r8d def %r8
-; AVX2-NEXT: # kill: def %ecx killed %ecx def %rcx
-; AVX2-NEXT: # kill: def %edx killed %edx def %rdx
-; AVX2-NEXT: # kill: def %esi killed %esi def %rsi
-; AVX2-NEXT: # kill: def %edi killed %edi def %rdi
+; AVX2-NEXT: # kill: def $r9d killed $r9d def $r9
+; AVX2-NEXT: # kill: def $r8d killed $r8d def $r8
+; AVX2-NEXT: # kill: def $ecx killed $ecx def $rcx
+; AVX2-NEXT: # kill: def $edx killed $edx def $rdx
+; AVX2-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX2-NEXT: # kill: def $edi killed $edi def $rdi
; AVX2-NEXT: andl $7, %edi
; AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX2-NEXT: movzwl -24(%rsp,%rdi,2), %eax
diff --git a/llvm/test/CodeGen/X86/vector-trunc-math.ll b/llvm/test/CodeGen/X86/vector-trunc-math.ll
index 12b17d6..80629a3 100644
--- a/llvm/test/CodeGen/X86/vector-trunc-math.ll
+++ b/llvm/test/CodeGen/X86/vector-trunc-math.ll
@@ -34,7 +34,7 @@
; AVX2-SLOW-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
@@ -43,7 +43,7 @@
; AVX2-FAST-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
-; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
@@ -51,7 +51,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = add <4 x i64> %a0, %a1
@@ -111,7 +111,7 @@
; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
@@ -125,7 +125,7 @@
; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
@@ -170,7 +170,7 @@
; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -178,7 +178,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = add <8 x i32> %a0, %a1
@@ -432,7 +432,7 @@
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -484,7 +484,7 @@
;
; AVX512-LABEL: trunc_add_v8i32_v8i16_sext_8i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
; AVX512-NEXT: vpmovdw %zmm1, %ymm1
; AVX512-NEXT: vpmovsxbw %xmm0, %xmm0
; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
@@ -534,7 +534,7 @@
;
; AVX512-LABEL: trunc_add_const_v4i64_v4i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -645,7 +645,7 @@
;
; AVX512-LABEL: trunc_add_const_v8i32_v8i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -862,7 +862,7 @@
;
; AVX512BW-LABEL: trunc_add_const_v16i16_v16i8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper
@@ -907,7 +907,7 @@
; AVX2-SLOW-NEXT: vpsubq %ymm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
@@ -916,7 +916,7 @@
; AVX2-FAST-NEXT: vpsubq %ymm1, %ymm0, %ymm0
; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
-; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
@@ -924,7 +924,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpsubq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <4 x i64> %a0, %a1
@@ -984,7 +984,7 @@
; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
@@ -998,7 +998,7 @@
; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
@@ -1043,7 +1043,7 @@
; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -1051,7 +1051,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <8 x i32> %a0, %a1
@@ -1305,7 +1305,7 @@
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsubw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1353,7 +1353,7 @@
; AVX2-SLOW-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
@@ -1362,7 +1362,7 @@
; AVX2-FAST-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
-; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
@@ -1370,7 +1370,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <4 x i64> %a0, <i64 0, i64 1, i64 2, i64 3>
@@ -1435,7 +1435,7 @@
; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
@@ -1449,7 +1449,7 @@
; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
@@ -1493,7 +1493,7 @@
; AVX2-NEXT: vpsubd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -1501,7 +1501,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpsubd {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <8 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -1754,7 +1754,7 @@
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1831,8 +1831,8 @@
;
; AVX512F-LABEL: trunc_mul_v4i64_v4i32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512F-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
; AVX512F-NEXT: vpmulld %xmm1, %xmm0, %xmm0
@@ -1841,8 +1841,8 @@
;
; AVX512BW-LABEL: trunc_mul_v4i64_v4i32:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
; AVX512BW-NEXT: vpmulld %xmm1, %xmm0, %xmm0
@@ -1851,11 +1851,11 @@
;
; AVX512DQ-LABEL: trunc_mul_v4i64_v4i32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = mul <4 x i64> %a0, %a1
@@ -2024,7 +2024,7 @@
; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -2032,7 +2032,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = mul <8 x i32> %a0, %a1
@@ -2484,7 +2484,7 @@
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -2536,7 +2536,7 @@
;
; AVX512-LABEL: trunc_mul_v8i32_v8i16_zext_8i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
; AVX512-NEXT: vpmovdw %zmm1, %ymm1
; AVX512-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
@@ -2601,7 +2601,7 @@
;
; AVX512-LABEL: trunc_mul_const_v4i64_v4i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -2712,7 +2712,7 @@
;
; AVX512-LABEL: trunc_mul_const_v8i32_v8i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -3081,7 +3081,7 @@
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -3122,7 +3122,7 @@
; AVX2-SLOW-NEXT: vandps %ymm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
@@ -3131,7 +3131,7 @@
; AVX2-FAST-NEXT: vandps %ymm1, %ymm0, %ymm0
; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
-; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
@@ -3139,7 +3139,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = and <4 x i64> %a0, %a1
@@ -3195,7 +3195,7 @@
; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
@@ -3209,7 +3209,7 @@
; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
@@ -3252,7 +3252,7 @@
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -3260,7 +3260,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = and <8 x i32> %a0, %a1
@@ -3500,7 +3500,7 @@
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -3553,7 +3553,7 @@
;
; AVX512-LABEL: trunc_and_const_v4i64_v4i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -3664,7 +3664,7 @@
;
; AVX512-LABEL: trunc_and_const_v8i32_v8i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -3881,7 +3881,7 @@
;
; AVX512BW-LABEL: trunc_and_const_v16i16_v16i8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper
@@ -3924,7 +3924,7 @@
; AVX2-SLOW-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
@@ -3933,7 +3933,7 @@
; AVX2-FAST-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
-; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
@@ -3941,7 +3941,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = xor <4 x i64> %a0, %a1
@@ -3997,7 +3997,7 @@
; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
@@ -4011,7 +4011,7 @@
; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
@@ -4054,7 +4054,7 @@
; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -4062,7 +4062,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = xor <8 x i32> %a0, %a1
@@ -4302,7 +4302,7 @@
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -4355,7 +4355,7 @@
;
; AVX512-LABEL: trunc_xor_const_v4i64_v4i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -4466,7 +4466,7 @@
;
; AVX512-LABEL: trunc_xor_const_v8i32_v8i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -4683,7 +4683,7 @@
;
; AVX512BW-LABEL: trunc_xor_const_v16i16_v16i8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper
@@ -4726,7 +4726,7 @@
; AVX2-SLOW-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
@@ -4735,7 +4735,7 @@
; AVX2-FAST-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
-; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
@@ -4743,7 +4743,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = or <4 x i64> %a0, %a1
@@ -4799,7 +4799,7 @@
; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
@@ -4813,7 +4813,7 @@
; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
@@ -4856,7 +4856,7 @@
; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -4864,7 +4864,7 @@
; AVX512: # %bb.0:
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = or <8 x i32> %a0, %a1
@@ -5104,7 +5104,7 @@
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -5157,7 +5157,7 @@
;
; AVX512-LABEL: trunc_or_const_v4i64_v4i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -5268,7 +5268,7 @@
;
; AVX512-LABEL: trunc_or_const_v8i32_v8i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -5485,7 +5485,7 @@
;
; AVX512BW-LABEL: trunc_or_const_v16i16_v16i8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper
diff --git a/llvm/test/CodeGen/X86/vector-trunc-packus.ll b/llvm/test/CodeGen/X86/vector-trunc-packus.ll
index 912e554..b07c0b2 100644
--- a/llvm/test/CodeGen/X86/vector-trunc-packus.ll
+++ b/llvm/test/CodeGen/X86/vector-trunc-packus.ll
@@ -210,7 +210,7 @@
; AVX2-SLOW-NEXT: vpand %ymm0, %ymm1, %ymm0
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
@@ -224,19 +224,19 @@
; AVX2-FAST-NEXT: vpand %ymm0, %ymm1, %ymm0
; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
-; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
; AVX512F-LABEL: trunc_packus_v4i64_v4i32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512F-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295]
; AVX512F-NEXT: vpminsq %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -251,13 +251,13 @@
;
; AVX512BW-LABEL: trunc_packus_v4i64_v4i32:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295]
; AVX512BW-NEXT: vpminsq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1072,7 +1072,7 @@
; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
@@ -1094,7 +1094,7 @@
; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
@@ -1206,7 +1206,7 @@
; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -1217,7 +1217,7 @@
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -1237,7 +1237,7 @@
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1826,7 +1826,7 @@
; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
@@ -1848,7 +1848,7 @@
; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
@@ -2809,7 +2809,7 @@
; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -2820,7 +2820,7 @@
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -2840,7 +2840,7 @@
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -3131,7 +3131,7 @@
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-trunc-ssat.ll b/llvm/test/CodeGen/X86/vector-trunc-ssat.ll
index 7cd448d..bfab987 100644
--- a/llvm/test/CodeGen/X86/vector-trunc-ssat.ll
+++ b/llvm/test/CodeGen/X86/vector-trunc-ssat.ll
@@ -226,7 +226,7 @@
; AVX2-SLOW-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
@@ -240,19 +240,19 @@
; AVX2-FAST-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-FAST-NEXT: vmovapd {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
-; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
; AVX512F-LABEL: trunc_ssat_v4i64_v4i32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512F-NEXT: vpbroadcastq {{.*#+}} ymm1 = [2147483647,2147483647,2147483647,2147483647]
; AVX512F-NEXT: vpminsq %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: vpbroadcastq {{.*#+}} ymm1 = [18446744071562067968,18446744071562067968,18446744071562067968,18446744071562067968]
; AVX512F-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -264,13 +264,13 @@
;
; AVX512BW-LABEL: trunc_ssat_v4i64_v4i32:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm1 = [2147483647,2147483647,2147483647,2147483647]
; AVX512BW-NEXT: vpminsq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm1 = [18446744071562067968,18446744071562067968,18446744071562067968,18446744071562067968]
; AVX512BW-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1138,7 +1138,7 @@
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
@@ -1160,7 +1160,7 @@
; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
@@ -1277,7 +1277,7 @@
; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -1288,7 +1288,7 @@
; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm1 = [4294934528,4294934528,4294934528,4294934528,4294934528,4294934528,4294934528,4294934528]
; AVX512F-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -1305,7 +1305,7 @@
; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm1 = [4294934528,4294934528,4294934528,4294934528,4294934528,4294934528,4294934528,4294934528]
; AVX512BW-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1936,7 +1936,7 @@
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
@@ -1958,7 +1958,7 @@
; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
@@ -2990,7 +2990,7 @@
; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -3001,7 +3001,7 @@
; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm1 = [4294967168,4294967168,4294967168,4294967168,4294967168,4294967168,4294967168,4294967168]
; AVX512F-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -3020,7 +3020,7 @@
; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm1 = [4294967168,4294967168,4294967168,4294967168,4294967168,4294967168,4294967168,4294967168]
; AVX512BW-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -3322,7 +3322,7 @@
; AVX512BW-NEXT: vpminsw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: vpmaxsw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-trunc-usat.ll b/llvm/test/CodeGen/X86/vector-trunc-usat.ll
index 2437c08..511f0c4 100644
--- a/llvm/test/CodeGen/X86/vector-trunc-usat.ll
+++ b/llvm/test/CodeGen/X86/vector-trunc-usat.ll
@@ -139,7 +139,7 @@
; AVX2-SLOW-NEXT: vblendvpd %ymm1, %ymm0, %ymm2, %ymm0
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
@@ -153,19 +153,19 @@
; AVX2-FAST-NEXT: vblendvpd %ymm1, %ymm0, %ymm2, %ymm0
; AVX2-FAST-NEXT: vmovapd {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
-; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
; AVX512F-LABEL: trunc_usat_v4i64_v4i32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512F-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295]
; AVX512F-NEXT: vpcmpltuq %zmm1, %zmm0, %k1
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,429496729]
; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
; AVX512F-NEXT: vpmovqd %zmm1, %ymm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -180,13 +180,13 @@
;
; AVX512BW-LABEL: trunc_usat_v4i64_v4i32:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295]
; AVX512BW-NEXT: vpcmpltuq %zmm1, %zmm0, %k1
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,429496729]
; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vpmovqd %zmm1, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -718,7 +718,7 @@
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
@@ -739,7 +739,7 @@
; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
@@ -834,7 +834,7 @@
; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm1 = [65535,65535,65535,65535,65535,65535,65535,65535]
; AVX512F-NEXT: vpminud %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -849,7 +849,7 @@
; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm1 = [65535,65535,65535,65535,65535,65535,65535,65535]
; AVX512BW-NEXT: vpminud %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1257,7 +1257,7 @@
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
@@ -1278,7 +1278,7 @@
; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
@@ -1927,7 +1927,7 @@
; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255]
; AVX512F-NEXT: vpminud %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -1943,7 +1943,7 @@
; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255]
; AVX512BW-NEXT: vpminud %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -2184,7 +2184,7 @@
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpminuw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-trunc.ll b/llvm/test/CodeGen/X86/vector-trunc.ll
index 6214898..ffe84d2 100644
--- a/llvm/test/CodeGen/X86/vector-trunc.ll
+++ b/llvm/test/CodeGen/X86/vector-trunc.ll
@@ -264,7 +264,7 @@
; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
@@ -276,7 +276,7 @@
; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
@@ -400,15 +400,15 @@
; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc8i32_8i16:
; AVX512F: # %bb.0: # %entry
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -420,9 +420,9 @@
;
; AVX512BW-LABEL: trunc8i32_8i16:
; AVX512BW: # %bb.0: # %entry
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -465,7 +465,7 @@
; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpsrad $16, %ymm0, %ymm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -480,7 +480,7 @@
; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpsrad $16, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -544,7 +544,7 @@
; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpsrld $16, %ymm0, %ymm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -559,7 +559,7 @@
; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpsrld $16, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -626,7 +626,7 @@
;
; AVX512F-LABEL: trunc8i32_8i8:
; AVX512F: # %bb.0: # %entry
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vmovq %xmm0, (%rax)
@@ -641,7 +641,7 @@
;
; AVX512BW-LABEL: trunc8i32_8i8:
; AVX512BW: # %bb.0: # %entry
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vmovq %xmm0, (%rax)
@@ -1139,7 +1139,7 @@
;
; AVX512BW-LABEL: trunc16i16_16i8:
; AVX512BW: # %bb.0: # %entry
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vmovdqu %xmm0, (%rax)
; AVX512BW-NEXT: vzeroupper
@@ -1433,8 +1433,8 @@
;
; AVX512F-LABEL: trunc2x4i64_8i32:
; AVX512F: # %bb.0: # %entry
-; AVX512F-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512F-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
@@ -1449,8 +1449,8 @@
;
; AVX512BW-LABEL: trunc2x4i64_8i32:
; AVX512BW: # %bb.0: # %entry
-; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
; AVX512BW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
@@ -1555,8 +1555,8 @@
;
; AVX512F-LABEL: trunc2x4i64_8i16:
; AVX512F: # %bb.0: # %entry
-; AVX512F-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512F-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -1579,8 +1579,8 @@
;
; AVX512BW-LABEL: trunc2x4i64_8i16:
; AVX512BW: # %bb.0: # %entry
-; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -1967,7 +1967,7 @@
;
; AVX512F-LABEL: PR32160:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,2,2,3,4,5,6,7]
; AVX512F-NEXT: vpbroadcastd %xmm0, %xmm0
@@ -1983,7 +1983,7 @@
;
; AVX512BW-LABEL: PR32160:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,4,5,4,5,4,5,4,5,4,5,4,5,4,5]
; AVX512BW-NEXT: vzeroupper
diff --git a/llvm/test/CodeGen/X86/vector-tzcnt-128.ll b/llvm/test/CodeGen/X86/vector-tzcnt-128.ll
index dfb0ade..d19c10d 100644
--- a/llvm/test/CodeGen/X86/vector-tzcnt-128.ll
+++ b/llvm/test/CodeGen/X86/vector-tzcnt-128.ll
@@ -134,7 +134,7 @@
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512VPOPCNTDQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512VPOPCNTDQ-NEXT: vzeroupper
; AVX512VPOPCNTDQ-NEXT: retq
;
@@ -365,7 +365,7 @@
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512VPOPCNTDQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512VPOPCNTDQ-NEXT: vzeroupper
; AVX512VPOPCNTDQ-NEXT: retq
;
@@ -646,7 +646,7 @@
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512VPOPCNTDQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512VPOPCNTDQ-NEXT: vzeroupper
; AVX512VPOPCNTDQ-NEXT: retq
;
@@ -917,7 +917,7 @@
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512VPOPCNTDQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512VPOPCNTDQ-NEXT: vzeroupper
; AVX512VPOPCNTDQ-NEXT: retq
;
@@ -1134,7 +1134,7 @@
; AVX512VPOPCNTDQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512VPOPCNTDQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512VPOPCNTDQ-NEXT: vzeroupper
; AVX512VPOPCNTDQ-NEXT: retq
;
@@ -1159,7 +1159,7 @@
; BITALG_NOVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; BITALG_NOVLX-NEXT: vpopcntw %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; BITALG_NOVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; BITALG_NOVLX-NEXT: vzeroupper
; BITALG_NOVLX-NEXT: retq
;
@@ -1330,7 +1330,7 @@
; AVX512VPOPCNTDQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512VPOPCNTDQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512VPOPCNTDQ-NEXT: vzeroupper
; AVX512VPOPCNTDQ-NEXT: retq
;
@@ -1355,7 +1355,7 @@
; BITALG_NOVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; BITALG_NOVLX-NEXT: vpopcntw %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; BITALG_NOVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; BITALG_NOVLX-NEXT: vzeroupper
; BITALG_NOVLX-NEXT: retq
;
@@ -1531,7 +1531,7 @@
; BITALG_NOVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; BITALG_NOVLX-NEXT: vpopcntb %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; BITALG_NOVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; BITALG_NOVLX-NEXT: vzeroupper
; BITALG_NOVLX-NEXT: retq
;
@@ -1703,7 +1703,7 @@
; BITALG_NOVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; BITALG_NOVLX-NEXT: vpopcntb %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; BITALG_NOVLX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; BITALG_NOVLX-NEXT: vzeroupper
; BITALG_NOVLX-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-tzcnt-256.ll b/llvm/test/CodeGen/X86/vector-tzcnt-256.ll
index e754760..775a7a3 100644
--- a/llvm/test/CodeGen/X86/vector-tzcnt-256.ll
+++ b/llvm/test/CodeGen/X86/vector-tzcnt-256.ll
@@ -104,7 +104,7 @@
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512VPOPCNTDQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512VPOPCNTDQ-NEXT: retq
;
; AVX512VPOPCNTDQVL-LABEL: testv4i64:
@@ -250,7 +250,7 @@
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512VPOPCNTDQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512VPOPCNTDQ-NEXT: retq
;
; AVX512VPOPCNTDQVL-LABEL: testv4i64u:
@@ -432,7 +432,7 @@
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512VPOPCNTDQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512VPOPCNTDQ-NEXT: retq
;
; AVX512VPOPCNTDQVL-LABEL: testv8i32:
@@ -603,7 +603,7 @@
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512VPOPCNTDQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512VPOPCNTDQ-NEXT: retq
;
; AVX512VPOPCNTDQVL-LABEL: testv8i32u:
@@ -812,7 +812,7 @@
; BITALG_NOVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; BITALG_NOVLX-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; BITALG_NOVLX-NEXT: vpopcntw %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; BITALG_NOVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv16i16:
@@ -975,7 +975,7 @@
; BITALG_NOVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; BITALG_NOVLX-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; BITALG_NOVLX-NEXT: vpopcntw %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; BITALG_NOVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv16i16u:
@@ -1133,7 +1133,7 @@
; BITALG_NOVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; BITALG_NOVLX-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; BITALG_NOVLX-NEXT: vpopcntb %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; BITALG_NOVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv32i8:
@@ -1288,7 +1288,7 @@
; BITALG_NOVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; BITALG_NOVLX-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; BITALG_NOVLX-NEXT: vpopcntb %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; BITALG_NOVLX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv32i8u:
diff --git a/llvm/test/CodeGen/X86/verifier-phi-fail0.mir b/llvm/test/CodeGen/X86/verifier-phi-fail0.mir
index c17b0da..bb707e4 100644
--- a/llvm/test/CodeGen/X86/verifier-phi-fail0.mir
+++ b/llvm/test/CodeGen/X86/verifier-phi-fail0.mir
@@ -15,7 +15,7 @@
tracksRegLiveness: true
body: |
bb.0:
- JE_1 %bb.1, implicit undef %eflags
+ JE_1 %bb.1, implicit undef $eflags
JMP_1 %bb.2
bb.1:
diff --git a/llvm/test/CodeGen/X86/verifier-phi.mir b/llvm/test/CodeGen/X86/verifier-phi.mir
index 78060dc..81a4cb0 100644
--- a/llvm/test/CodeGen/X86/verifier-phi.mir
+++ b/llvm/test/CodeGen/X86/verifier-phi.mir
@@ -7,7 +7,7 @@
tracksRegLiveness: true
body: |
bb.0:
- JE_1 %bb.1, implicit undef %eflags
+ JE_1 %bb.1, implicit undef $eflags
JMP_1 %bb.2
bb.1:
@@ -23,7 +23,7 @@
body: |
bb.0:
%0 : gr32 = IMPLICIT_DEF
- JE_1 %bb.1, implicit undef %eflags
+ JE_1 %bb.1, implicit undef $eflags
JMP_1 %bb.2
bb.1:
diff --git a/llvm/test/CodeGen/X86/virtual-registers-cleared-in-machine-functions-liveins.ll b/llvm/test/CodeGen/X86/virtual-registers-cleared-in-machine-functions-liveins.ll
index c1d2425..549c964 100644
--- a/llvm/test/CodeGen/X86/virtual-registers-cleared-in-machine-functions-liveins.ll
+++ b/llvm/test/CodeGen/X86/virtual-registers-cleared-in-machine-functions-liveins.ll
@@ -11,9 +11,9 @@
}
; PRE-RA: liveins:
-; PRE-RA-NEXT: - { reg: '%edi', virtual-reg: '%0' }
-; PRE-RA-NEXT: - { reg: '%esi', virtual-reg: '%1' }
+; PRE-RA-NEXT: - { reg: '$edi', virtual-reg: '%0' }
+; PRE-RA-NEXT: - { reg: '$esi', virtual-reg: '%1' }
; POST-RA: liveins:
-; POST-RA-NEXT: - { reg: '%edi', virtual-reg: '' }
-; POST-RA-NEXT: - { reg: '%esi', virtual-reg: '' }
+; POST-RA-NEXT: - { reg: '$edi', virtual-reg: '' }
+; POST-RA-NEXT: - { reg: '$esi', virtual-reg: '' }
diff --git a/llvm/test/CodeGen/X86/vpshufbitqbm-intrinsics.ll b/llvm/test/CodeGen/X86/vpshufbitqbm-intrinsics.ll
index 004dcfb..a80ed2f 100644
--- a/llvm/test/CodeGen/X86/vpshufbitqbm-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/vpshufbitqbm-intrinsics.ll
@@ -8,7 +8,7 @@
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshufbitqmb %xmm1, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.vpshufbitqmb.128(<16 x i8> %a, <16 x i8> %b, i16 %mask)
ret i16 %res
diff --git a/llvm/test/CodeGen/X86/vselect-pcmp.ll b/llvm/test/CodeGen/X86/vselect-pcmp.ll
index c473885..3b41981 100644
--- a/llvm/test/CodeGen/X86/vselect-pcmp.ll
+++ b/llvm/test/CodeGen/X86/vselect-pcmp.ll
@@ -50,13 +50,13 @@
;
; AVX512F-LABEL: signbit_sel_v4i32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2
-; AVX512F-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512F-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512F-NEXT: vpcmpgtd %zmm2, %zmm3, %k1
; AVX512F-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -79,13 +79,13 @@
;
; AVX512F-LABEL: signbit_sel_v2i64:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2
-; AVX512F-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512F-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512F-NEXT: vpcmpgtq %zmm2, %zmm3, %k1
; AVX512F-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -108,13 +108,13 @@
;
; AVX512F-LABEL: signbit_sel_v4f32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2
-; AVX512F-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512F-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512F-NEXT: vpcmpgtd %zmm2, %zmm3, %k1
; AVX512F-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -137,13 +137,13 @@
;
; AVX512F-LABEL: signbit_sel_v2f64:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2
-; AVX512F-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512F-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512F-NEXT: vpcmpgtq %zmm2, %zmm3, %k1
; AVX512F-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -230,13 +230,13 @@
;
; AVX512F-LABEL: signbit_sel_v8i32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
-; AVX512F-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512F-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512F-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512F-NEXT: vpcmpgtd %zmm2, %zmm3, %k1
; AVX512F-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: signbit_sel_v8i32:
@@ -258,13 +258,13 @@
;
; AVX512F-LABEL: signbit_sel_v4i64:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
-; AVX512F-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512F-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512F-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512F-NEXT: vpcmpgtq %zmm2, %zmm3, %k1
; AVX512F-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: signbit_sel_v4i64:
@@ -286,13 +286,13 @@
;
; AVX512F-LABEL: signbit_sel_v4f64:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
-; AVX512F-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512F-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512F-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512F-NEXT: vpcmpgtq %zmm2, %zmm3, %k1
; AVX512F-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: signbit_sel_v4f64:
@@ -326,13 +326,13 @@
;
; AVX512F-LABEL: signbit_sel_v4f64_small_mask:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2
-; AVX512F-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512F-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512F-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512F-NEXT: vpcmpgtd %zmm2, %zmm3, %k1
; AVX512F-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: signbit_sel_v4f64_small_mask:
@@ -380,12 +380,12 @@
;
; AVX512F-LABEL: signbit_sel_v4f32_fcmp:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vxorps %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vcmpltps %zmm2, %zmm0, %k1
; AVX512F-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/widen_bitops-0.ll b/llvm/test/CodeGen/X86/widen_bitops-0.ll
index f939396..5312737 100644
--- a/llvm/test/CodeGen/X86/widen_bitops-0.ll
+++ b/llvm/test/CodeGen/X86/widen_bitops-0.ll
@@ -141,9 +141,9 @@
; X32-SSE-NEXT: pextrb $0, %xmm1, %eax
; X32-SSE-NEXT: pextrb $4, %xmm1, %edx
; X32-SSE-NEXT: pextrb $8, %xmm1, %ecx
-; X32-SSE-NEXT: # kill: def %al killed %al killed %eax
-; X32-SSE-NEXT: # kill: def %dl killed %dl killed %edx
-; X32-SSE-NEXT: # kill: def %cl killed %cl killed %ecx
+; X32-SSE-NEXT: # kill: def $al killed $al killed $eax
+; X32-SSE-NEXT: # kill: def $dl killed $dl killed $edx
+; X32-SSE-NEXT: # kill: def $cl killed $cl killed $ecx
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: and_v3i8_as_i24:
@@ -158,9 +158,9 @@
; X64-SSE-NEXT: pextrb $0, %xmm1, %eax
; X64-SSE-NEXT: pextrb $4, %xmm1, %edx
; X64-SSE-NEXT: pextrb $8, %xmm1, %ecx
-; X64-SSE-NEXT: # kill: def %al killed %al killed %eax
-; X64-SSE-NEXT: # kill: def %dl killed %dl killed %edx
-; X64-SSE-NEXT: # kill: def %cl killed %cl killed %ecx
+; X64-SSE-NEXT: # kill: def $al killed $al killed $eax
+; X64-SSE-NEXT: # kill: def $dl killed $dl killed $edx
+; X64-SSE-NEXT: # kill: def $cl killed $cl killed $ecx
; X64-SSE-NEXT: retq
%1 = bitcast <3 x i8> %a to i24
%2 = bitcast <3 x i8> %b to i24
@@ -182,9 +182,9 @@
; X32-SSE-NEXT: pextrb $0, %xmm1, %eax
; X32-SSE-NEXT: pextrb $4, %xmm1, %edx
; X32-SSE-NEXT: pextrb $8, %xmm1, %ecx
-; X32-SSE-NEXT: # kill: def %al killed %al killed %eax
-; X32-SSE-NEXT: # kill: def %dl killed %dl killed %edx
-; X32-SSE-NEXT: # kill: def %cl killed %cl killed %ecx
+; X32-SSE-NEXT: # kill: def $al killed $al killed $eax
+; X32-SSE-NEXT: # kill: def $dl killed $dl killed $edx
+; X32-SSE-NEXT: # kill: def $cl killed $cl killed $ecx
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: xor_v3i8_as_i24:
@@ -199,9 +199,9 @@
; X64-SSE-NEXT: pextrb $0, %xmm1, %eax
; X64-SSE-NEXT: pextrb $4, %xmm1, %edx
; X64-SSE-NEXT: pextrb $8, %xmm1, %ecx
-; X64-SSE-NEXT: # kill: def %al killed %al killed %eax
-; X64-SSE-NEXT: # kill: def %dl killed %dl killed %edx
-; X64-SSE-NEXT: # kill: def %cl killed %cl killed %ecx
+; X64-SSE-NEXT: # kill: def $al killed $al killed $eax
+; X64-SSE-NEXT: # kill: def $dl killed $dl killed $edx
+; X64-SSE-NEXT: # kill: def $cl killed $cl killed $ecx
; X64-SSE-NEXT: retq
%1 = bitcast <3 x i8> %a to i24
%2 = bitcast <3 x i8> %b to i24
@@ -223,9 +223,9 @@
; X32-SSE-NEXT: pextrb $0, %xmm1, %eax
; X32-SSE-NEXT: pextrb $4, %xmm1, %edx
; X32-SSE-NEXT: pextrb $8, %xmm1, %ecx
-; X32-SSE-NEXT: # kill: def %al killed %al killed %eax
-; X32-SSE-NEXT: # kill: def %dl killed %dl killed %edx
-; X32-SSE-NEXT: # kill: def %cl killed %cl killed %ecx
+; X32-SSE-NEXT: # kill: def $al killed $al killed $eax
+; X32-SSE-NEXT: # kill: def $dl killed $dl killed $edx
+; X32-SSE-NEXT: # kill: def $cl killed $cl killed $ecx
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: or_v3i8_as_i24:
@@ -240,9 +240,9 @@
; X64-SSE-NEXT: pextrb $0, %xmm1, %eax
; X64-SSE-NEXT: pextrb $4, %xmm1, %edx
; X64-SSE-NEXT: pextrb $8, %xmm1, %ecx
-; X64-SSE-NEXT: # kill: def %al killed %al killed %eax
-; X64-SSE-NEXT: # kill: def %dl killed %dl killed %edx
-; X64-SSE-NEXT: # kill: def %cl killed %cl killed %ecx
+; X64-SSE-NEXT: # kill: def $al killed $al killed $eax
+; X64-SSE-NEXT: # kill: def $dl killed $dl killed $edx
+; X64-SSE-NEXT: # kill: def $cl killed $cl killed $ecx
; X64-SSE-NEXT: retq
%1 = bitcast <3 x i8> %a to i24
%2 = bitcast <3 x i8> %b to i24
diff --git a/llvm/test/CodeGen/X86/x86-64-baseptr.ll b/llvm/test/CodeGen/X86/x86-64-baseptr.ll
index 6fbcd3b..2c31a7a 100644
--- a/llvm/test/CodeGen/X86/x86-64-baseptr.ll
+++ b/llvm/test/CodeGen/X86/x86-64-baseptr.ll
@@ -43,7 +43,7 @@
; X32ABI-NEXT: subl $32, %esp
; X32ABI-NEXT: movl %esp, %ebx
; X32ABI-NEXT: callq helper
-; X32ABI-NEXT: # kill: def %eax killed %eax def %rax
+; X32ABI-NEXT: # kill: def $eax killed $eax def $rax
; X32ABI-NEXT: movl %esp, %ecx
; X32ABI-NEXT: leal 31(,%rax,4), %eax
; X32ABI-NEXT: andl $-32, %eax
diff --git a/llvm/test/CodeGen/X86/x86-interleaved-access.ll b/llvm/test/CodeGen/X86/x86-interleaved-access.ll
index c1ea412..ded8025 100644
--- a/llvm/test/CodeGen/X86/x86-interleaved-access.ll
+++ b/llvm/test/CodeGen/X86/x86-interleaved-access.ll
@@ -643,7 +643,7 @@
; AVX512-NEXT: vpcmpeqb %zmm0, %zmm3, %k1
; AVX512-NEXT: kxnorw %k1, %k0, %k0
; AVX512-NEXT: vpmovm2b %k0, %zmm0
-; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%wide.vec = load <64 x i8>, <64 x i8>* %ptr
@@ -946,7 +946,7 @@
; AVX512-NEXT: vpcmpeqb %zmm0, %zmm2, %k1
; AVX512-NEXT: kxnord %k1, %k0, %k0
; AVX512-NEXT: vpmovm2b %k0, %zmm0
-; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
%wide.vec = load <128 x i8>, <128 x i8>* %ptr
%v1 = shufflevector <128 x i8> %wide.vec, <128 x i8> undef, <32 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60, i32 64, i32 68, i32 72, i32 76, i32 80, i32 84, i32 88, i32 92, i32 96, i32 100, i32 104, i32 108, i32 112, i32 116, i32 120, i32 124>
diff --git a/llvm/test/CodeGen/X86/x86-upgrade-avx2-vbroadcast.ll b/llvm/test/CodeGen/X86/x86-upgrade-avx2-vbroadcast.ll
index 543d4f4..a0d0a86 100644
--- a/llvm/test/CodeGen/X86/x86-upgrade-avx2-vbroadcast.ll
+++ b/llvm/test/CodeGen/X86/x86-upgrade-avx2-vbroadcast.ll
@@ -8,7 +8,7 @@
define <4 x i64> @broadcast128(<2 x i64> %src) {
; CHECK-LABEL: broadcast128:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/xor-combine-debugloc.ll b/llvm/test/CodeGen/X86/xor-combine-debugloc.ll
index 4491d14..8b7e9d4 100644
--- a/llvm/test/CodeGen/X86/xor-combine-debugloc.ll
+++ b/llvm/test/CodeGen/X86/xor-combine-debugloc.ll
@@ -1,16 +1,16 @@
; RUN: llc -stop-after=expand-isel-pseudos < %s | FileCheck %s
;
; Make sure that when the entry block of IR below is lowered, an instruction
-; that implictly defines %eflags has a same debug location with the icmp
+; that implictly defines $eflags has a same debug location with the icmp
; instruction, and the branch instructions have a same debug location with the
; br instruction.
;
; CHECK: [[DLOC1:![0-9]+]] = !DILocation(line: 5, column: 9, scope: !{{[0-9]+}})
; CHECK: [[DLOC2:![0-9]+]] = !DILocation(line: 5, column: 7, scope: !{{[0-9]+}})
-; CHECK-DAG: [[VREG1:%[^ ]+]]:gr32 = COPY %esi
-; CHECK-DAG: [[VREG2:%[^ ]+]]:gr32 = COPY %edi
-; CHECK: SUB32rr [[VREG2]], [[VREG1]], implicit-def %eflags, debug-location [[DLOC1]]
-; CHECK-NEXT: JE_1{{.*}} implicit %eflags, debug-location [[DLOC2]]
+; CHECK-DAG: [[VREG1:%[^ ]+]]:gr32 = COPY $esi
+; CHECK-DAG: [[VREG2:%[^ ]+]]:gr32 = COPY $edi
+; CHECK: SUB32rr [[VREG2]], [[VREG1]], implicit-def $eflags, debug-location [[DLOC1]]
+; CHECK-NEXT: JE_1{{.*}} implicit $eflags, debug-location [[DLOC2]]
; CHECK-NEXT: JMP_1{{.*}} debug-location [[DLOC2]]
target triple = "x86_64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/X86/xray-empty-firstmbb.mir b/llvm/test/CodeGen/X86/xray-empty-firstmbb.mir
index 746ee15..5949a4a 100644
--- a/llvm/test/CodeGen/X86/xray-empty-firstmbb.mir
+++ b/llvm/test/CodeGen/X86/xray-empty-firstmbb.mir
@@ -15,9 +15,9 @@
name: foo
tracksRegLiveness: true
liveins:
- - { reg: '%edi'}
+ - { reg: '$edi'}
body: |
bb.0.entry:
- liveins: %edi
+ liveins: $edi
; CHECK-NOT: PATCHABLE_FUNCTION_ENTER
...
diff --git a/llvm/test/CodeGen/X86/xray-empty-function.mir b/llvm/test/CodeGen/X86/xray-empty-function.mir
index 3229c93..178771a 100644
--- a/llvm/test/CodeGen/X86/xray-empty-function.mir
+++ b/llvm/test/CodeGen/X86/xray-empty-function.mir
@@ -5,7 +5,7 @@
name: empty
tracksRegLiveness: true
liveins:
- - { reg: '%edi'}
+ - { reg: '$edi'}
body: |
bb.0:
; CHECK-NOT: PATCHABLE_FUNCTION_ENTER
diff --git a/llvm/test/CodeGen/X86/xray-multiplerets-in-blocks.mir b/llvm/test/CodeGen/X86/xray-multiplerets-in-blocks.mir
index 282148f..8cb15a4 100644
--- a/llvm/test/CodeGen/X86/xray-multiplerets-in-blocks.mir
+++ b/llvm/test/CodeGen/X86/xray-multiplerets-in-blocks.mir
@@ -16,10 +16,10 @@
name: foo
tracksRegLiveness: true
liveins:
- - { reg: '%edi'}
+ - { reg: '$edi'}
body: |
bb.0:
- liveins: %edi
+ liveins: $edi
; CHECK: PATCHABLE_FUNCTION_ENTER
RETQ
; CHECK-NEXT: PATCHABLE_RET
diff --git a/llvm/test/CodeGen/X86/zext-demanded.ll b/llvm/test/CodeGen/X86/zext-demanded.ll
index 117f8dc..db29272 100644
--- a/llvm/test/CodeGen/X86/zext-demanded.ll
+++ b/llvm/test/CodeGen/X86/zext-demanded.ll
@@ -10,7 +10,7 @@
; CHECK: # %bb.0:
; CHECK-NEXT: movzwl %di, %eax
; CHECK-NEXT: shrl %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%y = lshr i16 %x, 1
ret i16 %y
@@ -43,7 +43,7 @@
; CHECK: # %bb.0:
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: shrl %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%y = and i16 %x, 255
%z = lshr i16 %y, 1
@@ -55,7 +55,7 @@
; CHECK: # %bb.0:
; CHECK-NEXT: movzwl %di, %eax
; CHECK-NEXT: shrl $9, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%y = lshr i16 %x, 9
ret i16 %y
@@ -76,7 +76,7 @@
define i32 @test7(i32 %x) {
; CHECK-LABEL: test7:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
; CHECK-NEXT: andl $65534, %edi # imm = 0xFFFE
; CHECK-NEXT: leal 1(%rdi), %eax
; CHECK-NEXT: retq