| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s -check-prefix=CHECK -check-prefix=AVX |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s -check-prefix=CHECK -check-prefix=AVX2 |
| |
| ; |
| ; testz(~X,Y) -> testc(X,Y) |
| ; |
| |
| define i32 @testpdz_128_invert0(<2 x double> %c, <2 x double> %d, i32 %a, i32 %b) { |
| ; CHECK-LABEL: testpdz_128_invert0: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: movl %edi, %eax |
| ; CHECK-NEXT: vtestpd %xmm1, %xmm0 |
| ; CHECK-NEXT: cmovael %esi, %eax |
| ; CHECK-NEXT: retq |
| %t0 = bitcast <2 x double> %c to <2 x i64> |
| %t1 = xor <2 x i64> %t0, <i64 -1, i64 -1> |
| %t2 = bitcast <2 x i64> %t1 to <2 x double> |
| %t3 = call i32 @llvm.x86.avx.vtestz.pd(<2 x double> %t2, <2 x double> %d) |
| %t4 = icmp ne i32 %t3, 0 |
| %t5 = select i1 %t4, i32 %a, i32 %b |
| ret i32 %t5 |
| } |
| |
| define i32 @testpdz_256_invert0(<4 x double> %c, <4 x double> %d, i32 %a, i32 %b) { |
| ; CHECK-LABEL: testpdz_256_invert0: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: movl %edi, %eax |
| ; CHECK-NEXT: vtestpd %ymm1, %ymm0 |
| ; CHECK-NEXT: cmovael %esi, %eax |
| ; CHECK-NEXT: vzeroupper |
| ; CHECK-NEXT: retq |
| %t0 = bitcast <4 x double> %c to <4 x i64> |
| %t1 = xor <4 x i64> %t0, <i64 -1, i64 -1, i64 -1, i64 -1> |
| %t2 = bitcast <4 x i64> %t1 to <4 x double> |
| %t3 = call i32 @llvm.x86.avx.vtestz.pd.256(<4 x double> %t2, <4 x double> %d) |
| %t4 = icmp ne i32 %t3, 0 |
| %t5 = select i1 %t4, i32 %a, i32 %b |
| ret i32 %t5 |
| } |
| |
| ; |
| ; testz(X,~Y) -> testc(Y,X) |
| ; |
| |
| define i32 @testpdz_128_invert1(<2 x double> %c, <2 x double> %d, i32 %a, i32 %b) { |
| ; CHECK-LABEL: testpdz_128_invert1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: movl %edi, %eax |
| ; CHECK-NEXT: vtestpd %xmm0, %xmm1 |
| ; CHECK-NEXT: cmovael %esi, %eax |
| ; CHECK-NEXT: retq |
| %t0 = bitcast <2 x double> %d to <2 x i64> |
| %t1 = xor <2 x i64> %t0, <i64 -1, i64 -1> |
| %t2 = bitcast <2 x i64> %t1 to <2 x double> |
| %t3 = call i32 @llvm.x86.avx.vtestz.pd(<2 x double> %c, <2 x double> %t2) |
| %t4 = icmp ne i32 %t3, 0 |
| %t5 = select i1 %t4, i32 %a, i32 %b |
| ret i32 %t5 |
| } |
| |
| define i32 @testpdz_256_invert1(<4 x double> %c, <4 x double> %d, i32 %a, i32 %b) { |
| ; CHECK-LABEL: testpdz_256_invert1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: movl %edi, %eax |
| ; CHECK-NEXT: vtestpd %ymm0, %ymm1 |
| ; CHECK-NEXT: cmovael %esi, %eax |
| ; CHECK-NEXT: vzeroupper |
| ; CHECK-NEXT: retq |
| %t0 = bitcast <4 x double> %d to <4 x i64> |
| %t1 = xor <4 x i64> %t0, <i64 -1, i64 -1, i64 -1, i64 -1> |
| %t2 = bitcast <4 x i64> %t1 to <4 x double> |
| %t3 = call i32 @llvm.x86.avx.vtestz.pd.256(<4 x double> %c, <4 x double> %t2) |
| %t4 = icmp ne i32 %t3, 0 |
| %t5 = select i1 %t4, i32 %a, i32 %b |
| ret i32 %t5 |
| } |
| |
| ; |
| ; testc(~X,Y) -> testz(X,Y) |
| ; |
| |
| define i32 @testpdc_128_invert0(<2 x double> %c, <2 x double> %d, i32 %a, i32 %b) { |
| ; CHECK-LABEL: testpdc_128_invert0: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: movl %edi, %eax |
| ; CHECK-NEXT: vtestpd %xmm1, %xmm0 |
| ; CHECK-NEXT: cmovnel %esi, %eax |
| ; CHECK-NEXT: retq |
| %t0 = bitcast <2 x double> %c to <2 x i64> |
| %t1 = xor <2 x i64> %t0, <i64 -1, i64 -1> |
| %t2 = bitcast <2 x i64> %t1 to <2 x double> |
| %t3 = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> %t2, <2 x double> %d) |
| %t4 = icmp ne i32 %t3, 0 |
| %t5 = select i1 %t4, i32 %a, i32 %b |
| ret i32 %t5 |
| } |
| |
| define i32 @testpdc_256_invert0(<4 x double> %c, <4 x double> %d, i32 %a, i32 %b) { |
| ; CHECK-LABEL: testpdc_256_invert0: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: movl %edi, %eax |
| ; CHECK-NEXT: vtestpd %ymm1, %ymm0 |
| ; CHECK-NEXT: cmovnel %esi, %eax |
| ; CHECK-NEXT: vzeroupper |
| ; CHECK-NEXT: retq |
| %t0 = bitcast <4 x double> %c to <4 x i64> |
| %t1 = xor <4 x i64> %t0, <i64 -1, i64 -1, i64 -1, i64 -1> |
| %t2 = bitcast <4 x i64> %t1 to <4 x double> |
| %t3 = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %t2, <4 x double> %d) |
| %t4 = icmp ne i32 %t3, 0 |
| %t5 = select i1 %t4, i32 %a, i32 %b |
| ret i32 %t5 |
| } |
| |
| ; |
| ; testnzc(~X,Y) -> testnzc(X,Y) |
| ; |
| |
| define i32 @testpdnzc_128_invert0(<2 x double> %c, <2 x double> %d, i32 %a, i32 %b) { |
| ; CHECK-LABEL: testpdnzc_128_invert0: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: movl %edi, %eax |
| ; CHECK-NEXT: vtestpd %xmm1, %xmm0 |
| ; CHECK-NEXT: cmovbel %esi, %eax |
| ; CHECK-NEXT: retq |
| %t0 = bitcast <2 x double> %c to <2 x i64> |
| %t1 = xor <2 x i64> %t0, <i64 -1, i64 -1> |
| %t2 = bitcast <2 x i64> %t1 to <2 x double> |
| %t3 = call i32 @llvm.x86.avx.vtestnzc.pd(<2 x double> %t2, <2 x double> %d) |
| %t4 = icmp ne i32 %t3, 0 |
| %t5 = select i1 %t4, i32 %a, i32 %b |
| ret i32 %t5 |
| } |
| |
| define i32 @testpdnzc_256_invert0(<4 x double> %c, <4 x double> %d, i32 %a, i32 %b) { |
| ; CHECK-LABEL: testpdnzc_256_invert0: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: movl %edi, %eax |
| ; CHECK-NEXT: vtestpd %ymm1, %ymm0 |
| ; CHECK-NEXT: cmovbel %esi, %eax |
| ; CHECK-NEXT: vzeroupper |
| ; CHECK-NEXT: retq |
| %t0 = bitcast <4 x double> %c to <4 x i64> |
| %t1 = xor <4 x i64> %t0, <i64 -1, i64 -1, i64 -1, i64 -1> |
| %t2 = bitcast <4 x i64> %t1 to <4 x double> |
| %t3 = call i32 @llvm.x86.avx.vtestnzc.pd.256(<4 x double> %t2, <4 x double> %d) |
| %t4 = icmp ne i32 %t3, 0 |
| %t5 = select i1 %t4, i32 %a, i32 %b |
| ret i32 %t5 |
| } |
| |
| ; |
| ; SimplifyDemandedBits - only the sign bit is required |
| ; |
| |
| define i32 @testpdc_128_signbit(<2 x double> %c, <2 x double> %d, i32 %a, i32 %b) { |
| ; CHECK-LABEL: testpdc_128_signbit: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: movl %edi, %eax |
| ; CHECK-NEXT: vtestpd %xmm1, %xmm0 |
| ; CHECK-NEXT: cmovael %esi, %eax |
| ; CHECK-NEXT: retq |
| %t0 = bitcast <2 x double> %c to <2 x i64> |
| %t1 = ashr <2 x i64> %t0, <i64 63, i64 63> |
| %t2 = bitcast <2 x i64> %t1 to <2 x double> |
| %t3 = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> %t2, <2 x double> %d) |
| %t4 = icmp ne i32 %t3, 0 |
| %t5 = select i1 %t4, i32 %a, i32 %b |
| ret i32 %t5 |
| } |
| |
| define i32 @testpdz_256_signbit(<4 x double> %c, <4 x double> %d, i32 %a, i32 %b) { |
| ; CHECK-LABEL: testpdz_256_signbit: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: movl %edi, %eax |
| ; CHECK-NEXT: vtestpd %ymm1, %ymm0 |
| ; CHECK-NEXT: cmovnel %esi, %eax |
| ; CHECK-NEXT: vzeroupper |
| ; CHECK-NEXT: retq |
| %t0 = bitcast <4 x double> %c to <4 x i64> |
| %t1 = icmp sgt <4 x i64> zeroinitializer, %t0 |
| %t2 = sext <4 x i1> %t1 to <4 x i64> |
| %t3 = bitcast <4 x i64> %t2 to <4 x double> |
| %t4 = call i32 @llvm.x86.avx.vtestz.pd.256(<4 x double> %t3, <4 x double> %d) |
| %t5 = icmp ne i32 %t4, 0 |
| %t6 = select i1 %t5, i32 %a, i32 %b |
| ret i32 %t6 |
| } |
| |
| define i32 @testpdnzc_256_signbit_multiuse(<4 x double> %c, i32 %a, i32 %b) { |
| ; CHECK-LABEL: testpdnzc_256_signbit_multiuse: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: movl %edi, %eax |
| ; CHECK-NEXT: vtestpd %ymm0, %ymm0 |
| ; CHECK-NEXT: cmovnel %esi, %eax |
| ; CHECK-NEXT: vzeroupper |
| ; CHECK-NEXT: retq |
| %t0 = bitcast <4 x double> %c to <4 x i64> |
| %t1 = icmp sgt <4 x i64> zeroinitializer, %t0 |
| %t2 = sext <4 x i1> %t1 to <4 x i64> |
| %t3 = bitcast <4 x i64> %t2 to <4 x double> |
| %t4 = call i32 @llvm.x86.avx.vtestz.pd.256(<4 x double> %t3, <4 x double> %t3) |
| %t5 = icmp ne i32 %t4, 0 |
| %t6 = select i1 %t5, i32 %a, i32 %b |
| ret i32 %t6 |
| } |
| |
| define i1 @PR62171(<4 x double> %a0, <4 x double> %a1) { |
| ; CHECK-LABEL: PR62171: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm0 |
| ; CHECK-NEXT: vtestpd %ymm0, %ymm0 |
| ; CHECK-NEXT: sete %al |
| ; CHECK-NEXT: vzeroupper |
| ; CHECK-NEXT: retq |
| %cmp = fcmp oeq <4 x double> %a0, %a1 |
| %sext = sext <4 x i1> %cmp to <4 x i64> |
| %extract = shufflevector <4 x i64> %sext, <4 x i64> poison, <2 x i32> <i32 0, i32 1> |
| %extract1 = shufflevector <4 x i64> %sext, <4 x i64> poison, <2 x i32> <i32 2, i32 3> |
| %or = or <2 x i64> %extract, %extract1 |
| %or1 = bitcast <2 x i64> %or to <16 x i8> |
| %msk = icmp slt <16 x i8> %or1, zeroinitializer |
| %msk1 = bitcast <16 x i1> %msk to i16 |
| %not = icmp eq i16 %msk1, 0 |
| ret i1 %not |
| } |
| |
| define void @combine_testp_v4f64(<4 x i64> %x){ |
| ; AVX-LABEL: combine_testp_v4f64: |
| ; AVX: # %bb.0: # %entry |
| ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 |
| ; AVX-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1 |
| ; AVX-NEXT: vtestpd %ymm1, %ymm0 |
| ; AVX-NEXT: vzeroupper |
| ; AVX-NEXT: retq |
| ; |
| ; AVX2-LABEL: combine_testp_v4f64: |
| ; AVX2: # %bb.0: # %entry |
| ; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 |
| ; AVX2-NEXT: vtestpd %ymm1, %ymm0 |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| entry: |
| %xor.i.i.i.i.i.i.i.i.i = xor <4 x i64> %x, <i64 -1, i64 -1, i64 -1, i64 -1> |
| %.cast.i.i.i.i.i.i = bitcast <4 x i64> %xor.i.i.i.i.i.i.i.i.i to <4 x double> |
| %0 = call i32 @llvm.x86.avx.vtestz.pd.256(<4 x double> %.cast.i.i.i.i.i.i, <4 x double> %.cast.i.i.i.i.i.i) |
| %cmp.i.not.i.i.i.i.i.i = icmp eq i32 %0, 0 |
| br i1 %cmp.i.not.i.i.i.i.i.i, label %if.end3.i.i.i.i.i.i, label %end |
| |
| if.end3.i.i.i.i.i.i: ; preds = %entry |
| ret void |
| |
| end: ; preds = %entry |
| ret void |
| } |
| |
| declare i32 @llvm.x86.avx.vtestz.pd(<2 x double>, <2 x double>) nounwind readnone |
| declare i32 @llvm.x86.avx.vtestc.pd(<2 x double>, <2 x double>) nounwind readnone |
| declare i32 @llvm.x86.avx.vtestnzc.pd(<2 x double>, <2 x double>) nounwind readnone |
| |
| declare i32 @llvm.x86.avx.vtestz.pd.256(<4 x double>, <4 x double>) nounwind readnone |
| declare i32 @llvm.x86.avx.vtestc.pd.256(<4 x double>, <4 x double>) nounwind readnone |
| declare i32 @llvm.x86.avx.vtestnzc.pd.256(<4 x double>, <4 x double>) nounwind readnone |