blob: 750751d404f38ff714647caa992de65fdc2f4e4a [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X64
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=X64
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=X64
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=X64
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512vl,+avx512bw,+avx512dq | FileCheck %s --check-prefixes=X64
; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=X86
declare i1 @llvm.vector.reduce.and.v2i1(<2 x i1>)
declare i1 @llvm.vector.reduce.and.v4i1(<4 x i1>)
declare i1 @llvm.vector.reduce.and.v8i1(<8 x i1>)
; All four versions are semantically equivalent and should produce same asm as scalar version.
define i1 @intrinsic_v2i8(ptr align 1 %arg, ptr align 1 %arg1) {
; X64-LABEL: intrinsic_v2i8:
; X64: # %bb.0: # %bb
; X64-NEXT: movzwl (%rsi), %eax
; X64-NEXT: cmpw (%rdi), %ax
; X64-NEXT: sete %al
; X64-NEXT: retq
;
; X86-LABEL: intrinsic_v2i8:
; X86: # %bb.0: # %bb
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movzwl (%ecx), %ecx
; X86-NEXT: cmpw (%eax), %cx
; X86-NEXT: sete %al
; X86-NEXT: retl
bb:
%lhs = load <2 x i8>, ptr %arg1, align 1
%rhs = load <2 x i8>, ptr %arg, align 1
%cmp = icmp eq <2 x i8> %lhs, %rhs
%all_eq = call i1 @llvm.vector.reduce.and.v2i1(<2 x i1> %cmp)
ret i1 %all_eq
}
define i1 @intrinsic_v4i8(ptr align 1 %arg, ptr align 1 %arg1) {
; X64-LABEL: intrinsic_v4i8:
; X64: # %bb.0: # %bb
; X64-NEXT: movl (%rsi), %eax
; X64-NEXT: cmpl (%rdi), %eax
; X64-NEXT: sete %al
; X64-NEXT: retq
;
; X86-LABEL: intrinsic_v4i8:
; X86: # %bb.0: # %bb
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl (%ecx), %ecx
; X86-NEXT: cmpl (%eax), %ecx
; X86-NEXT: sete %al
; X86-NEXT: retl
bb:
%lhs = load <4 x i8>, ptr %arg1, align 1
%rhs = load <4 x i8>, ptr %arg, align 1
%cmp = icmp eq <4 x i8> %lhs, %rhs
%all_eq = call i1 @llvm.vector.reduce.and.v4i1(<4 x i1> %cmp)
ret i1 %all_eq
}
define i1 @intrinsic_v8i8(ptr align 1 %arg, ptr align 1 %arg1) {
; X64-LABEL: intrinsic_v8i8:
; X64: # %bb.0: # %bb
; X64-NEXT: movq (%rsi), %rax
; X64-NEXT: cmpq (%rdi), %rax
; X64-NEXT: sete %al
; X64-NEXT: retq
;
; X86-LABEL: intrinsic_v8i8:
; X86: # %bb.0: # %bb
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl (%ecx), %edx
; X86-NEXT: movl 4(%ecx), %ecx
; X86-NEXT: xorl 4(%eax), %ecx
; X86-NEXT: xorl (%eax), %edx
; X86-NEXT: orl %ecx, %edx
; X86-NEXT: sete %al
; X86-NEXT: retl
bb:
%lhs = load <8 x i8>, ptr %arg1, align 1
%rhs = load <8 x i8>, ptr %arg, align 1
%cmp = icmp eq <8 x i8> %lhs, %rhs
%all_eq = call i1 @llvm.vector.reduce.and.v8i1(<8 x i1> %cmp)
ret i1 %all_eq
}
define i1 @vector_version_v2i8(ptr align 1 %arg, ptr align 1 %arg1) {
; X64-LABEL: vector_version_v2i8:
; X64: # %bb.0: # %bb
; X64-NEXT: movzwl (%rsi), %eax
; X64-NEXT: cmpw (%rdi), %ax
; X64-NEXT: sete %al
; X64-NEXT: retq
;
; X86-LABEL: vector_version_v2i8:
; X86: # %bb.0: # %bb
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movzwl (%ecx), %ecx
; X86-NEXT: cmpw (%eax), %cx
; X86-NEXT: sete %al
; X86-NEXT: retl
bb:
%lhs = load <2 x i8>, ptr %arg1, align 1
%rhs = load <2 x i8>, ptr %arg, align 1
%any_ne = icmp ne <2 x i8> %lhs, %rhs
%any_ne_scalar = bitcast <2 x i1> %any_ne to i2
%all_eq = icmp eq i2 %any_ne_scalar, 0
ret i1 %all_eq
}
define i1 @vector_version_v4i8(ptr align 1 %arg, ptr align 1 %arg1) {
; X64-LABEL: vector_version_v4i8:
; X64: # %bb.0: # %bb
; X64-NEXT: movl (%rsi), %eax
; X64-NEXT: cmpl (%rdi), %eax
; X64-NEXT: sete %al
; X64-NEXT: retq
;
; X86-LABEL: vector_version_v4i8:
; X86: # %bb.0: # %bb
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl (%ecx), %ecx
; X86-NEXT: cmpl (%eax), %ecx
; X86-NEXT: sete %al
; X86-NEXT: retl
bb:
%lhs = load <4 x i8>, ptr %arg1, align 1
%rhs = load <4 x i8>, ptr %arg, align 1
%any_ne = icmp ne <4 x i8> %lhs, %rhs
%any_ne_scalar = bitcast <4 x i1> %any_ne to i4
%all_eq = icmp eq i4 %any_ne_scalar, 0
ret i1 %all_eq
}
define i1 @vector_version_v8i8(ptr align 1 %arg, ptr align 1 %arg1) {
; X64-LABEL: vector_version_v8i8:
; X64: # %bb.0: # %bb
; X64-NEXT: movq (%rsi), %rax
; X64-NEXT: cmpq (%rdi), %rax
; X64-NEXT: sete %al
; X64-NEXT: retq
;
; X86-LABEL: vector_version_v8i8:
; X86: # %bb.0: # %bb
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl (%ecx), %edx
; X86-NEXT: movl 4(%ecx), %ecx
; X86-NEXT: xorl 4(%eax), %ecx
; X86-NEXT: xorl (%eax), %edx
; X86-NEXT: orl %ecx, %edx
; X86-NEXT: sete %al
; X86-NEXT: retl
bb:
%lhs = load <8 x i8>, ptr %arg1, align 1
%rhs = load <8 x i8>, ptr %arg, align 1
%any_ne = icmp ne <8 x i8> %lhs, %rhs
%any_ne_scalar = bitcast <8 x i1> %any_ne to i8
%all_eq = icmp eq i8 %any_ne_scalar, 0
ret i1 %all_eq
}
define i1 @mixed_version_v2i8(ptr align 1 %arg, ptr align 1 %arg1) {
; X64-LABEL: mixed_version_v2i8:
; X64: # %bb.0: # %bb
; X64-NEXT: movzwl (%rsi), %eax
; X64-NEXT: cmpw (%rdi), %ax
; X64-NEXT: sete %al
; X64-NEXT: retq
;
; X86-LABEL: mixed_version_v2i8:
; X86: # %bb.0: # %bb
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movzwl (%ecx), %ecx
; X86-NEXT: cmpw (%eax), %cx
; X86-NEXT: sete %al
; X86-NEXT: retl
bb:
%lhs = load <2 x i8>, ptr %arg1, align 1
%rhs = load <2 x i8>, ptr %arg, align 1
%lhs_s = bitcast <2 x i8> %lhs to i16
%rhs_s = bitcast <2 x i8> %rhs to i16
%all_eq = icmp eq i16 %lhs_s, %rhs_s
ret i1 %all_eq
}
define i1 @mixed_version_v4i8(ptr align 1 %arg, ptr align 1 %arg1) {
; X64-LABEL: mixed_version_v4i8:
; X64: # %bb.0: # %bb
; X64-NEXT: movl (%rsi), %eax
; X64-NEXT: cmpl (%rdi), %eax
; X64-NEXT: sete %al
; X64-NEXT: retq
;
; X86-LABEL: mixed_version_v4i8:
; X86: # %bb.0: # %bb
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl (%ecx), %ecx
; X86-NEXT: cmpl (%eax), %ecx
; X86-NEXT: sete %al
; X86-NEXT: retl
bb:
%lhs = load <4 x i8>, ptr %arg1, align 1
%rhs = load <4 x i8>, ptr %arg, align 1
%lhs_s = bitcast <4 x i8> %lhs to i32
%rhs_s = bitcast <4 x i8> %rhs to i32
%all_eq = icmp eq i32 %lhs_s, %rhs_s
ret i1 %all_eq
}
define i1 @mixed_version_v8i8(ptr align 1 %arg, ptr align 1 %arg1) {
; X64-LABEL: mixed_version_v8i8:
; X64: # %bb.0: # %bb
; X64-NEXT: movq (%rsi), %rax
; X64-NEXT: cmpq (%rdi), %rax
; X64-NEXT: sete %al
; X64-NEXT: retq
;
; X86-LABEL: mixed_version_v8i8:
; X86: # %bb.0: # %bb
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl (%ecx), %edx
; X86-NEXT: movl 4(%ecx), %ecx
; X86-NEXT: xorl 4(%eax), %ecx
; X86-NEXT: xorl (%eax), %edx
; X86-NEXT: orl %ecx, %edx
; X86-NEXT: sete %al
; X86-NEXT: retl
bb:
%lhs = load <8 x i8>, ptr %arg1, align 1
%rhs = load <8 x i8>, ptr %arg, align 1
%lhs_s = bitcast <8 x i8> %lhs to i64
%rhs_s = bitcast <8 x i8> %rhs to i64
%all_eq = icmp eq i64 %lhs_s, %rhs_s
ret i1 %all_eq
}
define i1 @scalar_version_i16(ptr align 1 %arg, ptr align 1 %arg1) {
; X64-LABEL: scalar_version_i16:
; X64: # %bb.0: # %bb
; X64-NEXT: movzwl (%rsi), %eax
; X64-NEXT: cmpw (%rdi), %ax
; X64-NEXT: sete %al
; X64-NEXT: retq
;
; X86-LABEL: scalar_version_i16:
; X86: # %bb.0: # %bb
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movzwl (%ecx), %ecx
; X86-NEXT: cmpw (%eax), %cx
; X86-NEXT: sete %al
; X86-NEXT: retl
bb:
%lhs = load i16, ptr %arg1, align 1
%rhs = load i16, ptr %arg, align 1
%all_eq = icmp eq i16 %lhs, %rhs
ret i1 %all_eq
}
define i1 @scalar_version_i32(ptr align 1 %arg, ptr align 1 %arg1) {
; X64-LABEL: scalar_version_i32:
; X64: # %bb.0: # %bb
; X64-NEXT: movl (%rsi), %eax
; X64-NEXT: cmpl (%rdi), %eax
; X64-NEXT: sete %al
; X64-NEXT: retq
;
; X86-LABEL: scalar_version_i32:
; X86: # %bb.0: # %bb
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl (%ecx), %ecx
; X86-NEXT: cmpl (%eax), %ecx
; X86-NEXT: sete %al
; X86-NEXT: retl
bb:
%lhs = load i32, ptr %arg1, align 1
%rhs = load i32, ptr %arg, align 1
%all_eq = icmp eq i32 %lhs, %rhs
ret i1 %all_eq
}
define i1 @scalar_version_i64(ptr align 1 %arg, ptr align 1 %arg1) {
; X64-LABEL: scalar_version_i64:
; X64: # %bb.0: # %bb
; X64-NEXT: movq (%rsi), %rax
; X64-NEXT: cmpq (%rdi), %rax
; X64-NEXT: sete %al
; X64-NEXT: retq
;
; X86-LABEL: scalar_version_i64:
; X86: # %bb.0: # %bb
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl (%ecx), %edx
; X86-NEXT: movl 4(%ecx), %ecx
; X86-NEXT: xorl 4(%eax), %ecx
; X86-NEXT: xorl (%eax), %edx
; X86-NEXT: orl %ecx, %edx
; X86-NEXT: sete %al
; X86-NEXT: retl
bb:
%lhs = load i64, ptr %arg1, align 1
%rhs = load i64, ptr %arg, align 1
%all_eq = icmp eq i64 %lhs, %rhs
ret i1 %all_eq
}