| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| ; RUN: opt < %s -passes=instsimplify -S | FileCheck %s |
| ; RUN: opt < %s -passes=instsimplify -use-constant-int-for-fixed-length-splat -use-constant-int-for-scalable-splat -S | FileCheck %s |
| |
| define i32 @add_0() { |
| ; CHECK-LABEL: @add_0( |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| %x = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> zeroinitializer) |
| ret i32 %x |
| } |
| |
| define i32 @add_0_scalable_vector() { |
| ; CHECK-LABEL: @add_0_scalable_vector( |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| %x = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> zeroinitializer) |
| ret i32 %x |
| } |
| |
| define i32 @add_1() { |
| ; CHECK-LABEL: @add_1( |
| ; CHECK-NEXT: ret i32 8 |
| ; |
| %x = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ret i32 %x |
| } |
| |
| define i32 @add_1_scalable_vector() { |
| ; CHECK-LABEL: @add_1_scalable_vector( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> splat (i32 1)) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> splat (i32 1)) |
| ret i32 %x |
| } |
| |
| define i32 @add_inc() { |
| ; CHECK-LABEL: @add_inc( |
| ; CHECK-NEXT: ret i32 18 |
| ; |
| %x = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> <i32 1, i32 -3, i32 5, i32 7, i32 2, i32 4, i32 -6, i32 8>) |
| ret i32 %x |
| } |
| |
| define i32 @add_1v() { |
| ; CHECK-LABEL: @add_1v( |
| ; CHECK-NEXT: ret i32 10 |
| ; |
| %x = call i32 @llvm.vector.reduce.add.v1i32(<1 x i32> <i32 10>) |
| ret i32 %x |
| } |
| |
| define i32 @add_undef() { |
| ; CHECK-LABEL: @add_undef( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> undef) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> undef) |
| ret i32 %x |
| } |
| |
| define i32 @add_undef_scalable_vector() { |
| ; CHECK-LABEL: @add_undef_scalable_vector( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> undef) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> undef) |
| ret i32 %x |
| } |
| |
| define i32 @add_undef_elt() { |
| ; CHECK-LABEL: @add_undef_elt( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ret i32 %x |
| } |
| |
| define i32 @add_poison() { |
| ; CHECK-LABEL: @add_poison( |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| %x = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> poison) |
| ret i32 %x |
| } |
| |
| define i32 @add_poison_scalable_vector() { |
| ; CHECK-LABEL: @add_poison_scalable_vector( |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| %x = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> poison) |
| ret i32 %x |
| } |
| |
| define i32 @add_poison_elt() { |
| ; CHECK-LABEL: @add_poison_elt( |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| %x = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> <i32 1, i32 1, i32 poison, i32 1, i32 1, i32 42, i32 1, i32 1>) |
| ret i32 %x |
| } |
| |
| define i32 @add_constexpr() { |
| ; CHECK-LABEL: @add_constexpr( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> bitcast (<4 x i64> <i64 0, i64 1, i64 2, i64 3> to <8 x i32>)) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> bitcast (<4 x i64> <i64 0, i64 1, i64 2, i64 3> to <8 x i32>)) |
| ret i32 %x |
| } |
| |
| define i32 @mul_0() { |
| ; CHECK-LABEL: @mul_0( |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| %x = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> zeroinitializer) |
| ret i32 %x |
| } |
| |
| define i32 @mul_0_scalable_vector() { |
| ; CHECK-LABEL: @mul_0_scalable_vector( |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| %x = call i32 @llvm.vector.reduce.mul.nxv8i32(<vscale x 8 x i32> zeroinitializer) |
| ret i32 %x |
| } |
| |
| define i32 @mul_1() { |
| ; CHECK-LABEL: @mul_1( |
| ; CHECK-NEXT: ret i32 1 |
| ; |
| %x = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ret i32 %x |
| } |
| |
| define i32 @mul_1_scalable_vector() { |
| ; CHECK-LABEL: @mul_1_scalable_vector( |
| ; CHECK-NEXT: ret i32 1 |
| ; |
| %x = call i32 @llvm.vector.reduce.mul.nxv8i32(<vscale x 8 x i32> splat (i32 1)) |
| ret i32 %x |
| } |
| |
| define i32 @mul_2() { |
| ; CHECK-LABEL: @mul_2( |
| ; CHECK-NEXT: ret i32 256 |
| ; |
| %x = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>) |
| ret i32 %x |
| } |
| |
| define i32 @mul_2_scalable_vector() { |
| ; CHECK-LABEL: @mul_2_scalable_vector( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.mul.nxv8i32(<vscale x 8 x i32> splat (i32 2)) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.mul.nxv8i32(<vscale x 8 x i32> splat (i32 2)) |
| ret i32 %x |
| } |
| |
| define i32 @mul_inc() { |
| ; CHECK-LABEL: @mul_inc( |
| ; CHECK-NEXT: ret i32 40320 |
| ; |
| %x = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> <i32 1, i32 -3, i32 5, i32 7, i32 2, i32 4, i32 -6, i32 8>) |
| ret i32 %x |
| } |
| |
| define i32 @mul_1v() { |
| ; CHECK-LABEL: @mul_1v( |
| ; CHECK-NEXT: ret i32 10 |
| ; |
| %x = call i32 @llvm.vector.reduce.mul.v1i32(<1 x i32> <i32 10>) |
| ret i32 %x |
| } |
| |
| define i32 @mul_undef() { |
| ; CHECK-LABEL: @mul_undef( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> undef) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> undef) |
| ret i32 %x |
| } |
| |
| define i32 @mul_undef_scalable_vector() { |
| ; CHECK-LABEL: @mul_undef_scalable_vector( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.mul.nxv8i32(<vscale x 8 x i32> undef) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.mul.nxv8i32(<vscale x 8 x i32> undef) |
| ret i32 %x |
| } |
| |
| define i32 @mul_undef_elt() { |
| ; CHECK-LABEL: @mul_undef_elt( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ret i32 %x |
| } |
| |
| define i32 @mul_poison() { |
| ; CHECK-LABEL: @mul_poison( |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| %x = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> poison) |
| ret i32 %x |
| } |
| |
| define i32 @mul_poison_scalable_vector() { |
| ; CHECK-LABEL: @mul_poison_scalable_vector( |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| %x = call i32 @llvm.vector.reduce.mul.nxv8i32(<vscale x 8 x i32> poison) |
| ret i32 %x |
| } |
| |
| define i32 @mul_poison_elt() { |
| ; CHECK-LABEL: @mul_poison_elt( |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| %x = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> <i32 0, i32 1, i32 poison, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ret i32 %x |
| } |
| |
| define i32 @and_0() { |
| ; CHECK-LABEL: @and_0( |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| %x = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> zeroinitializer) |
| ret i32 %x |
| } |
| |
| define i32 @and_0_scalable_vector() { |
| ; CHECK-LABEL: @and_0_scalable_vector( |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| %x = call i32 @llvm.vector.reduce.and.nxv8i32(<vscale x 8 x i32> zeroinitializer) |
| ret i32 %x |
| } |
| |
| define i32 @and_1() { |
| ; CHECK-LABEL: @and_1( |
| ; CHECK-NEXT: ret i32 1 |
| ; |
| %x = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ret i32 %x |
| } |
| |
| define i32 @and_1_scalable_vector() { |
| ; CHECK-LABEL: @and_1_scalable_vector( |
| ; CHECK-NEXT: ret i32 1 |
| ; |
| %x = call i32 @llvm.vector.reduce.and.nxv8i32(<vscale x 8 x i32> splat (i32 1)) |
| ret i32 %x |
| } |
| |
| define i32 @and_inc() { |
| ; CHECK-LABEL: @and_inc( |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| %x = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> <i32 1, i32 -3, i32 5, i32 7, i32 2, i32 4, i32 -6, i32 8>) |
| ret i32 %x |
| } |
| |
| define i32 @and_1v() { |
| ; CHECK-LABEL: @and_1v( |
| ; CHECK-NEXT: ret i32 10 |
| ; |
| %x = call i32 @llvm.vector.reduce.and.v1i32(<1 x i32> <i32 10>) |
| ret i32 %x |
| } |
| |
| define i32 @and_undef() { |
| ; CHECK-LABEL: @and_undef( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> undef) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> undef) |
| ret i32 %x |
| } |
| |
| define i32 @and_undef_scalable_vector() { |
| ; CHECK-LABEL: @and_undef_scalable_vector( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.and.nxv8i32(<vscale x 8 x i32> undef) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.and.nxv8i32(<vscale x 8 x i32> undef) |
| ret i32 %x |
| } |
| |
| define i32 @and_undef_elt() { |
| ; CHECK-LABEL: @and_undef_elt( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ret i32 %x |
| } |
| |
| define i32 @and_poison() { |
| ; CHECK-LABEL: @and_poison( |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| %x = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> poison) |
| ret i32 %x |
| } |
| |
| define i32 @and_poison_scalable_vector() { |
| ; CHECK-LABEL: @and_poison_scalable_vector( |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| %x = call i32 @llvm.vector.reduce.and.nxv8i32(<vscale x 8 x i32> poison) |
| ret i32 %x |
| } |
| |
| define i32 @and_poison_elt() { |
| ; CHECK-LABEL: @and_poison_elt( |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| %x = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> <i32 -1, i32 1, i32 poison, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ret i32 %x |
| } |
| |
| define i32 @or_0() { |
| ; CHECK-LABEL: @or_0( |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| %x = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> zeroinitializer) |
| ret i32 %x |
| } |
| |
| define i32 @or_0_scalable_vector() { |
| ; CHECK-LABEL: @or_0_scalable_vector( |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| %x = call i32 @llvm.vector.reduce.or.nxv8i32(<vscale x 8 x i32> zeroinitializer) |
| ret i32 %x |
| } |
| |
| define i32 @or_1() { |
| ; CHECK-LABEL: @or_1( |
| ; CHECK-NEXT: ret i32 1 |
| ; |
| %x = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ret i32 %x |
| } |
| |
| define i32 @or_1_scalable_vector() { |
| ; CHECK-LABEL: @or_1_scalable_vector( |
| ; CHECK-NEXT: ret i32 1 |
| ; |
| %x = call i32 @llvm.vector.reduce.or.nxv8i32(<vscale x 8 x i32> splat (i32 1)) |
| ret i32 %x |
| } |
| |
| define i32 @or_inc() { |
| ; CHECK-LABEL: @or_inc( |
| ; CHECK-NEXT: ret i32 -1 |
| ; |
| %x = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> <i32 1, i32 -3, i32 5, i32 7, i32 2, i32 4, i32 -6, i32 8>) |
| ret i32 %x |
| } |
| |
| define i32 @or_1v() { |
| ; CHECK-LABEL: @or_1v( |
| ; CHECK-NEXT: ret i32 10 |
| ; |
| %x = call i32 @llvm.vector.reduce.or.v1i32(<1 x i32> <i32 10>) |
| ret i32 %x |
| } |
| |
| define i32 @or_undef() { |
| ; CHECK-LABEL: @or_undef( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> undef) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> undef) |
| ret i32 %x |
| } |
| |
| define i32 @or_undef_scalable_vector() { |
| ; CHECK-LABEL: @or_undef_scalable_vector( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.or.nxv8i32(<vscale x 8 x i32> undef) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.or.v8i32(<vscale x 8 x i32> undef) |
| ret i32 %x |
| } |
| |
| define i32 @or_undef_elt() { |
| ; CHECK-LABEL: @or_undef_elt( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ret i32 %x |
| } |
| |
| define i32 @or_poison() { |
| ; CHECK-LABEL: @or_poison( |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| %x = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> poison) |
| ret i32 %x |
| } |
| |
| define i32 @or_poison_scalable_vector() { |
| ; CHECK-LABEL: @or_poison_scalable_vector( |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| %x = call i32 @llvm.vector.reduce.or.nxv8i32(<vscale x 8 x i32> poison) |
| ret i32 %x |
| } |
| |
| define i32 @or_poison_elt() { |
| ; CHECK-LABEL: @or_poison_elt( |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| %x = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> <i32 1, i32 0, i32 poison, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ret i32 %x |
| } |
| |
| define i32 @xor_0() { |
| ; CHECK-LABEL: @xor_0( |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| %x = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> zeroinitializer) |
| ret i32 %x |
| } |
| |
| define i32 @xor_0_scalable_vector() { |
| ; CHECK-LABEL: @xor_0_scalable_vector( |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| %x = call i32 @llvm.vector.reduce.xor.nxv8i32(<vscale x 8 x i32> zeroinitializer) |
| ret i32 %x |
| } |
| |
| define i32 @xor_1() { |
| ; CHECK-LABEL: @xor_1( |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| %x = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ret i32 %x |
| } |
| |
| define i32 @xor_1_scalable_vector() { |
| ; CHECK-LABEL: @xor_1_scalable_vector( |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| %x = call i32 @llvm.vector.reduce.xor.nxv8i32(<vscale x 8 x i32> splat(i32 1)) |
| ret i32 %x |
| } |
| |
| define i32 @xor_1_scalable_vector_lane_count_not_known_even() { |
| ; CHECK-LABEL: @xor_1_scalable_vector_lane_count_not_known_even( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.xor.nxv1i32(<vscale x 1 x i32> splat (i32 1)) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.xor.nxv8i32(<vscale x 1 x i32> splat(i32 1)) |
| ret i32 %x |
| } |
| |
| define i32 @xor_inc() { |
| ; CHECK-LABEL: @xor_inc( |
| ; CHECK-NEXT: ret i32 10 |
| ; |
| %x = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> <i32 1, i32 -3, i32 5, i32 7, i32 2, i32 4, i32 -6, i32 8>) |
| ret i32 %x |
| } |
| |
| define i32 @xor_1v() { |
| ; CHECK-LABEL: @xor_1v( |
| ; CHECK-NEXT: ret i32 10 |
| ; |
| %x = call i32 @llvm.vector.reduce.xor.v1i32(<1 x i32> <i32 10>) |
| ret i32 %x |
| } |
| |
| define i32 @xor_undef() { |
| ; CHECK-LABEL: @xor_undef( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> undef) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> undef) |
| ret i32 %x |
| } |
| |
| define i32 @xor_undef_scalable_vector() { |
| ; CHECK-LABEL: @xor_undef_scalable_vector( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.xor.nxv8i32(<vscale x 8 x i32> undef) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.xor.nxv8i32(<vscale x 8 x i32> undef) |
| ret i32 %x |
| } |
| |
| define i32 @xor_undef_elt() { |
| ; CHECK-LABEL: @xor_undef_elt( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ret i32 %x |
| } |
| |
| define i32 @xor_poison() { |
| ; CHECK-LABEL: @xor_poison( |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| %x = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> poison) |
| ret i32 %x |
| } |
| |
| define i32 @xor_poison_scalable_vector() { |
| ; CHECK-LABEL: @xor_poison_scalable_vector( |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| %x = call i32 @llvm.vector.reduce.xor.nxv8i32(<vscale x 8 x i32> poison) |
| ret i32 %x |
| } |
| |
| define i32 @xor_poison_elt() { |
| ; CHECK-LABEL: @xor_poison_elt( |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| %x = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> <i32 poison, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ret i32 %x |
| } |
| |
| define i32 @smin_0() { |
| ; CHECK-LABEL: @smin_0( |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| %x = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> zeroinitializer) |
| ret i32 %x |
| } |
| |
| define i32 @smin_0_scalable_vector() { |
| ; CHECK-LABEL: @smin_0_scalable_vector( |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| %x = call i32 @llvm.vector.reduce.smin.nxv8i32(<vscale x 8 x i32> zeroinitializer) |
| ret i32 %x |
| } |
| |
| define i32 @smin_1() { |
| ; CHECK-LABEL: @smin_1( |
| ; CHECK-NEXT: ret i32 1 |
| ; |
| %x = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ret i32 %x |
| } |
| |
| define i32 @smin_1_scalable_vector() { |
| ; CHECK-LABEL: @smin_1_scalable_vector( |
| ; CHECK-NEXT: ret i32 1 |
| ; |
| %x = call i32 @llvm.vector.reduce.smin.nxv8i32(<vscale x 8 x i32> splat(i32 1)) |
| ret i32 %x |
| } |
| |
| define i32 @smin_inc() { |
| ; CHECK-LABEL: @smin_inc( |
| ; CHECK-NEXT: ret i32 -6 |
| ; |
| %x = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> <i32 1, i32 -3, i32 5, i32 7, i32 2, i32 4, i32 -6, i32 8>) |
| ret i32 %x |
| } |
| |
| define i32 @smin_1v() { |
| ; CHECK-LABEL: @smin_1v( |
| ; CHECK-NEXT: ret i32 10 |
| ; |
| %x = call i32 @llvm.vector.reduce.smin.v1i32(<1 x i32> <i32 10>) |
| ret i32 %x |
| } |
| |
| define i32 @smin_undef() { |
| ; CHECK-LABEL: @smin_undef( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> undef) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> undef) |
| ret i32 %x |
| } |
| |
| define i32 @smin_undef_scalable_vector() { |
| ; CHECK-LABEL: @smin_undef_scalable_vector( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.smin.nxv8i32(<vscale x 8 x i32> undef) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.smin.nxv8i32(<vscale x 8 x i32> undef) |
| ret i32 %x |
| } |
| |
| define i32 @smin_undef_elt() { |
| ; CHECK-LABEL: @smin_undef_elt( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ret i32 %x |
| } |
| |
| define i32 @smin_poison() { |
| ; CHECK-LABEL: @smin_poison( |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| %x = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> poison) |
| ret i32 %x |
| } |
| |
| define i32 @smin_poison_scalable_vector() { |
| ; CHECK-LABEL: @smin_poison_scalable_vector( |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| %x = call i32 @llvm.vector.reduce.smin.nxv8i32(<vscale x 8 x i32> poison) |
| ret i32 %x |
| } |
| |
| define i32 @smin_poison_elt() { |
| ; CHECK-LABEL: @smin_poison_elt( |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| %x = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 poison, i32 1, i32 1, i32 1>) |
| ret i32 %x |
| } |
| |
| define i32 @smax_0() { |
| ; CHECK-LABEL: @smax_0( |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| %x = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> zeroinitializer) |
| ret i32 %x |
| } |
| |
| define i32 @smax_0_scalable_vector() { |
| ; CHECK-LABEL: @smax_0_scalable_vector( |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| %x = call i32 @llvm.vector.reduce.smax.nxv8i32(<vscale x 8 x i32> zeroinitializer) |
| ret i32 %x |
| } |
| |
| define i32 @smax_1() { |
| ; CHECK-LABEL: @smax_1( |
| ; CHECK-NEXT: ret i32 1 |
| ; |
| %x = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ret i32 %x |
| } |
| |
| define i32 @smax_1_scalable_vector() { |
| ; CHECK-LABEL: @smax_1_scalable_vector( |
| ; CHECK-NEXT: ret i32 1 |
| ; |
| %x = call i32 @llvm.vector.reduce.smax.nxv8i32(<vscale x 8 x i32> splat(i32 1)) |
| ret i32 %x |
| } |
| |
| define i32 @smax_inc() { |
| ; CHECK-LABEL: @smax_inc( |
| ; CHECK-NEXT: ret i32 8 |
| ; |
| %x = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> <i32 1, i32 -3, i32 5, i32 7, i32 2, i32 4, i32 -6, i32 8>) |
| ret i32 %x |
| } |
| |
| define i32 @smax_1v() { |
| ; CHECK-LABEL: @smax_1v( |
| ; CHECK-NEXT: ret i32 10 |
| ; |
| %x = call i32 @llvm.vector.reduce.smax.v1i32(<1 x i32> <i32 10>) |
| ret i32 %x |
| } |
| |
| define i32 @smax_undef() { |
| ; CHECK-LABEL: @smax_undef( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> undef) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> undef) |
| ret i32 %x |
| } |
| |
| define i32 @smax_undef_scalable_vector() { |
| ; CHECK-LABEL: @smax_undef_scalable_vector( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.smax.nxv8i32(<vscale x 8 x i32> undef) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.smax.nxv8i32(<vscale x 8 x i32> undef) |
| ret i32 %x |
| } |
| |
| define i32 @smax_undef_elt() { |
| ; CHECK-LABEL: @smax_undef_elt( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ret i32 %x |
| } |
| |
| define i32 @smax_poison() { |
| ; CHECK-LABEL: @smax_poison( |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| %x = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> poison) |
| ret i32 %x |
| } |
| |
| define i32 @smax_poison_scalable_vector() { |
| ; CHECK-LABEL: @smax_poison_scalable_vector( |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| %x = call i32 @llvm.vector.reduce.smax.nxv8i32(<vscale x 8 x i32> poison) |
| ret i32 %x |
| } |
| |
| define i32 @smax_poison_elt() { |
| ; CHECK-LABEL: @smax_poison_elt( |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| %x = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> <i32 1, i32 1, i32 0, i32 1, i32 1, i32 1, i32 1, i32 poison>) |
| ret i32 %x |
| } |
| |
| define i32 @umin_0() { |
| ; CHECK-LABEL: @umin_0( |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| %x = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> zeroinitializer) |
| ret i32 %x |
| } |
| |
| define i32 @umin_0_scalable_vector() { |
| ; CHECK-LABEL: @umin_0_scalable_vector( |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| %x = call i32 @llvm.vector.reduce.umin.nxv8i32(<vscale x 8 x i32> zeroinitializer) |
| ret i32 %x |
| } |
| |
| define i32 @umin_1() { |
| ; CHECK-LABEL: @umin_1( |
| ; CHECK-NEXT: ret i32 1 |
| ; |
| %x = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ret i32 %x |
| } |
| |
| define i32 @umin_1_scalable_vector() { |
| ; CHECK-LABEL: @umin_1_scalable_vector( |
| ; CHECK-NEXT: ret i32 1 |
| ; |
| %x = call i32 @llvm.vector.reduce.umin.nxv8i32(<vscale x 8 x i32> splat (i32 1)) |
| ret i32 %x |
| } |
| |
| define i32 @umin_inc() { |
| ; CHECK-LABEL: @umin_inc( |
| ; CHECK-NEXT: ret i32 1 |
| ; |
| %x = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> <i32 1, i32 -3, i32 5, i32 7, i32 2, i32 4, i32 -6, i32 8>) |
| ret i32 %x |
| } |
| |
| define i32 @umin_1v() { |
| ; CHECK-LABEL: @umin_1v( |
| ; CHECK-NEXT: ret i32 10 |
| ; |
| %x = call i32 @llvm.vector.reduce.umin.v1i32(<1 x i32> <i32 10>) |
| ret i32 %x |
| } |
| |
| define i32 @umin_undef() { |
| ; CHECK-LABEL: @umin_undef( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> undef) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> undef) |
| ret i32 %x |
| } |
| |
| define i32 @umin_undef_scalable_vector() { |
| ; CHECK-LABEL: @umin_undef_scalable_vector( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.umin.nxv8i32(<vscale x 8 x i32> undef) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.umin.nxv8i32(<vscale x 8 x i32> undef) |
| ret i32 %x |
| } |
| |
| define i32 @umin_undef_elt() { |
| ; CHECK-LABEL: @umin_undef_elt( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ret i32 %x |
| } |
| |
| define i32 @umin_poison() { |
| ; CHECK-LABEL: @umin_poison( |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| %x = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> poison) |
| ret i32 %x |
| } |
| |
| define i32 @umin_poison_scalable_vector() { |
| ; CHECK-LABEL: @umin_poison_scalable_vector( |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| %x = call i32 @llvm.vector.reduce.umin.nxv8i32(<vscale x 8 x i32> poison) |
| ret i32 %x |
| } |
| |
| define i32 @umin_poison_elt() { |
| ; CHECK-LABEL: @umin_poison_elt( |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| %x = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> <i32 1, i32 1, i32 -1, i32 poison, i32 1, i32 1, i32 1, i32 1>) |
| ret i32 %x |
| } |
| |
| define i32 @umax_0() { |
| ; CHECK-LABEL: @umax_0( |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| %x = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> zeroinitializer) |
| ret i32 %x |
| } |
| |
| define i32 @umax_0_scalable_vector() { |
| ; CHECK-LABEL: @umax_0_scalable_vector( |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| %x = call i32 @llvm.vector.reduce.umax.nxv8i32(<vscale x 8 x i32> zeroinitializer) |
| ret i32 %x |
| } |
| |
| define i32 @umax_1() { |
| ; CHECK-LABEL: @umax_1( |
| ; CHECK-NEXT: ret i32 1 |
| ; |
| %x = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ret i32 %x |
| } |
| |
| define i32 @umax_1_scalable_vector() { |
| ; CHECK-LABEL: @umax_1_scalable_vector( |
| ; CHECK-NEXT: ret i32 1 |
| ; |
| %x = call i32 @llvm.vector.reduce.umax.nxv8i32(<vscale x 8 x i32> splat(i32 1)) |
| ret i32 %x |
| } |
| |
| define i32 @umax_inc() { |
| ; CHECK-LABEL: @umax_inc( |
| ; CHECK-NEXT: ret i32 -3 |
| ; |
| %x = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> <i32 1, i32 -3, i32 5, i32 7, i32 2, i32 4, i32 -6, i32 8>) |
| ret i32 %x |
| } |
| |
| define i32 @umax_1v() { |
| ; CHECK-LABEL: @umax_1v( |
| ; CHECK-NEXT: ret i32 10 |
| ; |
| %x = call i32 @llvm.vector.reduce.umax.v1i32(<1 x i32> <i32 10>) |
| ret i32 %x |
| } |
| |
| define i32 @umax_undef() { |
| ; CHECK-LABEL: @umax_undef( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> undef) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> undef) |
| ret i32 %x |
| } |
| |
| define i32 @umax_undef_scalable_vector() { |
| ; CHECK-LABEL: @umax_undef_scalable_vector( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.umax.nxv8i32(<vscale x 8 x i32> undef) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.umax.nxv8i32(<vscale x 8 x i32> undef) |
| ret i32 %x |
| } |
| |
| define i32 @umax_undef_elt() { |
| ; CHECK-LABEL: @umax_undef_elt( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> <i32 1, i32 1, i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1>) |
| ret i32 %x |
| } |
| |
| define i32 @umax_poison() { |
| ; CHECK-LABEL: @umax_poison( |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| %x = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> poison) |
| ret i32 %x |
| } |
| |
| define i32 @umax_poison_scalable_vector() { |
| ; CHECK-LABEL: @umax_poison_scalable_vector( |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| %x = call i32 @llvm.vector.reduce.umax.nxv8i32(<vscale x 8 x i32> poison) |
| ret i32 %x |
| } |
| |
| define i32 @umax_poison_elt() { |
| ; CHECK-LABEL: @umax_poison_elt( |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| %x = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> <i32 1, i32 1, i32 poison, i32 1, i32 1, i32 poison, i32 1, i32 1>) |
| ret i32 %x |
| } |