blob: 2cec995926e1779bf8ced9aae9205502b1de85c0 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=aarch64-unknown-linux-gnu -mattr=+neon | FileCheck %s
define i1 @test_redxor_v1i1(<1 x i1> %a) {
; CHECK-LABEL: test_redxor_v1i1:
; CHECK: // %bb.0:
; CHECK-NEXT: and w0, w0, #0x1
; CHECK-NEXT: ret
%or_result = call i1 @llvm.vector.reduce.xor.v1i1(<1 x i1> %a)
ret i1 %or_result
}
define i1 @test_redxor_v2i1(<2 x i1> %a) {
; CHECK-LABEL: test_redxor_v2i1:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: mov w8, v0.s[1]
; CHECK-NEXT: fmov w9, s0
; CHECK-NEXT: eor w8, w9, w8
; CHECK-NEXT: and w0, w8, #0x1
; CHECK-NEXT: ret
%or_result = call i1 @llvm.vector.reduce.xor.v2i1(<2 x i1> %a)
ret i1 %or_result
}
define i1 @test_redxor_v4i1(<4 x i1> %a) {
; CHECK-LABEL: test_redxor_v4i1:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: umov w10, v0.h[1]
; CHECK-NEXT: umov w11, v0.h[0]
; CHECK-NEXT: umov w9, v0.h[2]
; CHECK-NEXT: eor w10, w11, w10
; CHECK-NEXT: umov w8, v0.h[3]
; CHECK-NEXT: eor w9, w10, w9
; CHECK-NEXT: eor w8, w9, w8
; CHECK-NEXT: and w0, w8, #0x1
; CHECK-NEXT: ret
%or_result = call i1 @llvm.vector.reduce.xor.v4i1(<4 x i1> %a)
ret i1 %or_result
}
define i1 @test_redxor_v8i1(<8 x i1> %a) {
; CHECK-LABEL: test_redxor_v8i1:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: umov w14, v0.b[1]
; CHECK-NEXT: umov w15, v0.b[0]
; CHECK-NEXT: umov w13, v0.b[2]
; CHECK-NEXT: eor w14, w15, w14
; CHECK-NEXT: umov w12, v0.b[3]
; CHECK-NEXT: eor w13, w14, w13
; CHECK-NEXT: umov w11, v0.b[4]
; CHECK-NEXT: eor w12, w13, w12
; CHECK-NEXT: umov w10, v0.b[5]
; CHECK-NEXT: eor w11, w12, w11
; CHECK-NEXT: umov w9, v0.b[6]
; CHECK-NEXT: eor w10, w11, w10
; CHECK-NEXT: umov w8, v0.b[7]
; CHECK-NEXT: eor w9, w10, w9
; CHECK-NEXT: eor w8, w9, w8
; CHECK-NEXT: and w0, w8, #0x1
; CHECK-NEXT: ret
%or_result = call i1 @llvm.vector.reduce.xor.v8i1(<8 x i1> %a)
ret i1 %or_result
}
define i1 @test_redxor_v16i1(<16 x i1> %a) {
; CHECK-LABEL: test_redxor_v16i1:
; CHECK: // %bb.0:
; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b
; CHECK-NEXT: umov w8, v0.b[1]
; CHECK-NEXT: umov w9, v0.b[0]
; CHECK-NEXT: eor w8, w9, w8
; CHECK-NEXT: umov w9, v0.b[2]
; CHECK-NEXT: eor w8, w8, w9
; CHECK-NEXT: umov w9, v0.b[3]
; CHECK-NEXT: eor w8, w8, w9
; CHECK-NEXT: umov w9, v0.b[4]
; CHECK-NEXT: eor w8, w8, w9
; CHECK-NEXT: umov w9, v0.b[5]
; CHECK-NEXT: eor w8, w8, w9
; CHECK-NEXT: umov w9, v0.b[6]
; CHECK-NEXT: eor w8, w8, w9
; CHECK-NEXT: umov w9, v0.b[7]
; CHECK-NEXT: eor w8, w8, w9
; CHECK-NEXT: and w0, w8, #0x1
; CHECK-NEXT: ret
%or_result = call i1 @llvm.vector.reduce.xor.v16i1(<16 x i1> %a)
ret i1 %or_result
}
define i8 @test_redxor_v1i8(<1 x i8> %a) {
; CHECK-LABEL: test_redxor_v1i8:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: umov w0, v0.b[0]
; CHECK-NEXT: ret
%xor_result = call i8 @llvm.vector.reduce.xor.v1i8(<1 x i8> %a)
ret i8 %xor_result
}
define i8 @test_redxor_v3i8(<3 x i8> %a) {
; CHECK-LABEL: test_redxor_v3i8:
; CHECK: // %bb.0:
; CHECK-NEXT: eor w8, w0, w1
; CHECK-NEXT: eor w0, w8, w2
; CHECK-NEXT: ret
%xor_result = call i8 @llvm.vector.reduce.xor.v3i8(<3 x i8> %a)
ret i8 %xor_result
}
define i8 @test_redxor_v4i8(<4 x i8> %a) {
; CHECK-LABEL: test_redxor_v4i8:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: umov w10, v0.h[1]
; CHECK-NEXT: umov w11, v0.h[0]
; CHECK-NEXT: umov w9, v0.h[2]
; CHECK-NEXT: eor w10, w11, w10
; CHECK-NEXT: umov w8, v0.h[3]
; CHECK-NEXT: eor w9, w10, w9
; CHECK-NEXT: eor w0, w9, w8
; CHECK-NEXT: ret
%xor_result = call i8 @llvm.vector.reduce.xor.v4i8(<4 x i8> %a)
ret i8 %xor_result
}
define i8 @test_redxor_v8i8(<8 x i8> %a) {
; CHECK-LABEL: test_redxor_v8i8:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: umov w14, v0.b[1]
; CHECK-NEXT: umov w15, v0.b[0]
; CHECK-NEXT: umov w13, v0.b[2]
; CHECK-NEXT: eor w14, w15, w14
; CHECK-NEXT: umov w12, v0.b[3]
; CHECK-NEXT: eor w13, w14, w13
; CHECK-NEXT: umov w11, v0.b[4]
; CHECK-NEXT: eor w12, w13, w12
; CHECK-NEXT: umov w10, v0.b[5]
; CHECK-NEXT: eor w11, w12, w11
; CHECK-NEXT: umov w9, v0.b[6]
; CHECK-NEXT: eor w10, w11, w10
; CHECK-NEXT: umov w8, v0.b[7]
; CHECK-NEXT: eor w9, w10, w9
; CHECK-NEXT: eor w0, w9, w8
; CHECK-NEXT: ret
%xor_result = call i8 @llvm.vector.reduce.xor.v8i8(<8 x i8> %a)
ret i8 %xor_result
}
define i8 @test_redxor_v16i8(<16 x i8> %a) {
; CHECK-LABEL: test_redxor_v16i8:
; CHECK: // %bb.0:
; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b
; CHECK-NEXT: umov w8, v0.b[1]
; CHECK-NEXT: umov w9, v0.b[0]
; CHECK-NEXT: eor w8, w9, w8
; CHECK-NEXT: umov w9, v0.b[2]
; CHECK-NEXT: eor w8, w8, w9
; CHECK-NEXT: umov w9, v0.b[3]
; CHECK-NEXT: eor w8, w8, w9
; CHECK-NEXT: umov w9, v0.b[4]
; CHECK-NEXT: eor w8, w8, w9
; CHECK-NEXT: umov w9, v0.b[5]
; CHECK-NEXT: eor w8, w8, w9
; CHECK-NEXT: umov w9, v0.b[6]
; CHECK-NEXT: eor w8, w8, w9
; CHECK-NEXT: umov w9, v0.b[7]
; CHECK-NEXT: eor w0, w8, w9
; CHECK-NEXT: ret
%xor_result = call i8 @llvm.vector.reduce.xor.v16i8(<16 x i8> %a)
ret i8 %xor_result
}
define i8 @test_redxor_v32i8(<32 x i8> %a) {
; CHECK-LABEL: test_redxor_v32i8:
; CHECK: // %bb.0:
; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b
; CHECK-NEXT: umov w8, v0.b[1]
; CHECK-NEXT: umov w9, v0.b[0]
; CHECK-NEXT: eor w8, w9, w8
; CHECK-NEXT: umov w9, v0.b[2]
; CHECK-NEXT: eor w8, w8, w9
; CHECK-NEXT: umov w9, v0.b[3]
; CHECK-NEXT: eor w8, w8, w9
; CHECK-NEXT: umov w9, v0.b[4]
; CHECK-NEXT: eor w8, w8, w9
; CHECK-NEXT: umov w9, v0.b[5]
; CHECK-NEXT: eor w8, w8, w9
; CHECK-NEXT: umov w9, v0.b[6]
; CHECK-NEXT: eor w8, w8, w9
; CHECK-NEXT: umov w9, v0.b[7]
; CHECK-NEXT: eor w0, w8, w9
; CHECK-NEXT: ret
%xor_result = call i8 @llvm.vector.reduce.xor.v32i8(<32 x i8> %a)
ret i8 %xor_result
}
define i16 @test_redxor_v4i16(<4 x i16> %a) {
; CHECK-LABEL: test_redxor_v4i16:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: umov w10, v0.h[1]
; CHECK-NEXT: umov w11, v0.h[0]
; CHECK-NEXT: umov w9, v0.h[2]
; CHECK-NEXT: eor w10, w11, w10
; CHECK-NEXT: umov w8, v0.h[3]
; CHECK-NEXT: eor w9, w10, w9
; CHECK-NEXT: eor w0, w9, w8
; CHECK-NEXT: ret
%xor_result = call i16 @llvm.vector.reduce.xor.v4i16(<4 x i16> %a)
ret i16 %xor_result
}
define i16 @test_redxor_v8i16(<8 x i16> %a) {
; CHECK-LABEL: test_redxor_v8i16:
; CHECK: // %bb.0:
; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b
; CHECK-NEXT: umov w8, v0.h[1]
; CHECK-NEXT: umov w9, v0.h[0]
; CHECK-NEXT: umov w10, v0.h[2]
; CHECK-NEXT: eor w8, w9, w8
; CHECK-NEXT: eor w8, w8, w10
; CHECK-NEXT: umov w9, v0.h[3]
; CHECK-NEXT: eor w0, w8, w9
; CHECK-NEXT: ret
%xor_result = call i16 @llvm.vector.reduce.xor.v8i16(<8 x i16> %a)
ret i16 %xor_result
}
define i16 @test_redxor_v16i16(<16 x i16> %a) {
; CHECK-LABEL: test_redxor_v16i16:
; CHECK: // %bb.0:
; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b
; CHECK-NEXT: umov w8, v0.h[1]
; CHECK-NEXT: umov w9, v0.h[0]
; CHECK-NEXT: umov w10, v0.h[2]
; CHECK-NEXT: eor w8, w9, w8
; CHECK-NEXT: eor w8, w8, w10
; CHECK-NEXT: umov w9, v0.h[3]
; CHECK-NEXT: eor w0, w8, w9
; CHECK-NEXT: ret
%xor_result = call i16 @llvm.vector.reduce.xor.v16i16(<16 x i16> %a)
ret i16 %xor_result
}
define i32 @test_redxor_v2i32(<2 x i32> %a) {
; CHECK-LABEL: test_redxor_v2i32:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: mov w8, v0.s[1]
; CHECK-NEXT: fmov w9, s0
; CHECK-NEXT: eor w0, w9, w8
; CHECK-NEXT: ret
%xor_result = call i32 @llvm.vector.reduce.xor.v2i32(<2 x i32> %a)
ret i32 %xor_result
}
define i32 @test_redxor_v4i32(<4 x i32> %a) {
; CHECK-LABEL: test_redxor_v4i32:
; CHECK: // %bb.0:
; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b
; CHECK-NEXT: mov w8, v0.s[1]
; CHECK-NEXT: fmov w9, s0
; CHECK-NEXT: eor w0, w9, w8
; CHECK-NEXT: ret
%xor_result = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> %a)
ret i32 %xor_result
}
define i32 @test_redxor_v8i32(<8 x i32> %a) {
; CHECK-LABEL: test_redxor_v8i32:
; CHECK: // %bb.0:
; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b
; CHECK-NEXT: mov w8, v0.s[1]
; CHECK-NEXT: fmov w9, s0
; CHECK-NEXT: eor w0, w9, w8
; CHECK-NEXT: ret
%xor_result = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> %a)
ret i32 %xor_result
}
define i64 @test_redxor_v2i64(<2 x i64> %a) {
; CHECK-LABEL: test_redxor_v2i64:
; CHECK: // %bb.0:
; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b
; CHECK-NEXT: fmov x0, d0
; CHECK-NEXT: ret
%xor_result = call i64 @llvm.vector.reduce.xor.v2i64(<2 x i64> %a)
ret i64 %xor_result
}
define i64 @test_redxor_v4i64(<4 x i64> %a) {
; CHECK-LABEL: test_redxor_v4i64:
; CHECK: // %bb.0:
; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b
; CHECK-NEXT: fmov x0, d0
; CHECK-NEXT: ret
%xor_result = call i64 @llvm.vector.reduce.xor.v4i64(<4 x i64> %a)
ret i64 %xor_result
}
declare i1 @llvm.vector.reduce.xor.v1i1(<1 x i1>)
declare i1 @llvm.vector.reduce.xor.v2i1(<2 x i1>)
declare i1 @llvm.vector.reduce.xor.v4i1(<4 x i1>)
declare i1 @llvm.vector.reduce.xor.v8i1(<8 x i1>)
declare i1 @llvm.vector.reduce.xor.v16i1(<16 x i1>)
declare i64 @llvm.vector.reduce.xor.v2i64(<2 x i64>)
declare i64 @llvm.vector.reduce.xor.v4i64(<4 x i64>)
declare i32 @llvm.vector.reduce.xor.v2i32(<2 x i32>)
declare i32 @llvm.vector.reduce.xor.v4i32(<4 x i32>)
declare i32 @llvm.vector.reduce.xor.v8i32(<8 x i32>)
declare i16 @llvm.vector.reduce.xor.v4i16(<4 x i16>)
declare i16 @llvm.vector.reduce.xor.v8i16(<8 x i16>)
declare i16 @llvm.vector.reduce.xor.v16i16(<16 x i16>)
declare i8 @llvm.vector.reduce.xor.v1i8(<1 x i8>)
declare i8 @llvm.vector.reduce.xor.v3i8(<3 x i8>)
declare i8 @llvm.vector.reduce.xor.v4i8(<4 x i8>)
declare i8 @llvm.vector.reduce.xor.v8i8(<8 x i8>)
declare i8 @llvm.vector.reduce.xor.v16i8(<16 x i8>)
declare i8 @llvm.vector.reduce.xor.v32i8(<32 x i8>)