| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| ; RUN: opt -passes=instsimplify < %s -S | FileCheck %s |
| |
| declare void @llvm.assume(i1) |
| declare i8 @llvm.abs.i8(i8, i1) |
| declare i8 @llvm.bitreverse.i8(i8) |
| declare i16 @llvm.bswap.i16(i16) |
| declare i8 @llvm.ctpop.i8(i8) |
| declare <2 x i8> @llvm.uadd.sat.2xi8(<2 x i8>, <2 x i8>) |
| declare i8 @llvm.uadd.sat.i8(i8, i8) |
| declare i8 @llvm.fshr.i8(i8, i8, i8) |
| declare i8 @llvm.fshl.i8(i8, i8, i8) |
| declare i8 @llvm.ctlz.i8(i8, i1) |
| declare i8 @llvm.cttz.i8(i8, i1) |
| declare i8 @llvm.sadd.sat.i8(i8, i8) |
| declare i8 @llvm.smax.i8(i8, i8) |
| declare i8 @llvm.smin.i8(i8, i8) |
| declare i8 @llvm.sshl.sat.i8(i8, i8) |
| declare i8 @llvm.ssub.sat.i8(i8, i8) |
| declare i8 @llvm.umax.i8(i8, i8) |
| declare i8 @llvm.umin.i8(i8, i8) |
| declare i8 @llvm.ushl.sat.i8(i8, i8) |
| declare i8 @llvm.usub.sat.i8(i8, i8) |
| declare float @llvm.maximum.f32(float, float) |
| |
| ;; Throughout use: X > Y || Y == 0 which folds to X > Y iff X known |
| ;; non-zero. Do this because many of the expressions already have |
| ;; hardcoded cases for folding Foo(X) == 0 -> X == 0 and we want to |
| ;; test explicitly that `isKnownNonZero` works. |
| |
| define i1 @check_neg(i8 %x, i8 %y) { |
| ; CHECK-LABEL: @check_neg( |
| ; CHECK-NEXT: [[NE:%.*]] = icmp ne i8 [[X:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[NE]]) |
| ; CHECK-NEXT: [[Z:%.*]] = sub i8 0, [[X]] |
| ; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i8 [[Z]], [[Y:%.*]] |
| ; CHECK-NEXT: ret i1 [[CMP0]] |
| ; |
| %ne = icmp ne i8 %x, 0 |
| call void @llvm.assume(i1 %ne) |
| %z = sub i8 0, %x |
| %cmp0 = icmp ugt i8 %z, %y |
| %cmp1 = icmp eq i8 %y, 0 |
| %r = or i1 %cmp0, %cmp1 |
| ret i1 %r |
| } |
| |
| define i1 @check_abs(i8 %x, i8 %y) { |
| ; CHECK-LABEL: @check_abs( |
| ; CHECK-NEXT: [[NE:%.*]] = icmp ne i8 [[X:%.*]], 0 |
| ; CHECK-NEXT: br i1 [[NE]], label [[TRUE:%.*]], label [[FALSE:%.*]] |
| ; CHECK: true: |
| ; CHECK-NEXT: [[Z:%.*]] = call i8 @llvm.abs.i8(i8 [[X]], i1 true) |
| ; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i8 [[Z]], [[Y:%.*]] |
| ; CHECK-NEXT: ret i1 [[CMP0]] |
| ; CHECK: false: |
| ; CHECK-NEXT: ret i1 [[NE]] |
| ; |
| %ne = icmp ne i8 %x, 0 |
| br i1 %ne, label %true, label %false |
| true: |
| %z = call i8 @llvm.abs.i8(i8 %x, i1 true) |
| %cmp0 = icmp ugt i8 %z, %y |
| %cmp1 = icmp eq i8 %y, 0 |
| %r = or i1 %cmp0, %cmp1 |
| ret i1 %r |
| false: |
| ret i1 %ne |
| } |
| |
| define i1 @check_abs_failish(i8 %x, i8 %y) { |
| ; CHECK-LABEL: @check_abs_failish( |
| ; CHECK-NEXT: [[NE:%.*]] = icmp ne i8 [[X:%.*]], 0 |
| ; CHECK-NEXT: br i1 [[NE]], label [[TRUE:%.*]], label [[FALSE:%.*]] |
| ; CHECK: false: |
| ; CHECK-NEXT: [[Z:%.*]] = call i8 @llvm.abs.i8(i8 [[X]], i1 true) |
| ; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i8 [[Z]], [[Y:%.*]] |
| ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[Y]], 0 |
| ; CHECK-NEXT: [[R:%.*]] = or i1 [[CMP0]], [[CMP1]] |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; CHECK: true: |
| ; CHECK-NEXT: ret i1 [[NE]] |
| ; |
| %ne = icmp ne i8 %x, 0 |
| br i1 %ne, label %true, label %false |
| false: |
| %z = call i8 @llvm.abs.i8(i8 %x, i1 true) |
| %cmp0 = icmp ugt i8 %z, %y |
| %cmp1 = icmp eq i8 %y, 0 |
| %r = or i1 %cmp0, %cmp1 |
| ret i1 %r |
| true: |
| ret i1 %ne |
| } |
| |
| define i1 @check_bitreverse(i8 %x, i8 %y) { |
| ; CHECK-LABEL: @check_bitreverse( |
| ; CHECK-NEXT: [[NE:%.*]] = icmp ne i8 [[X:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[NE]]) |
| ; CHECK-NEXT: [[Z:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[X]]) |
| ; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i8 [[Z]], [[Y:%.*]] |
| ; CHECK-NEXT: ret i1 [[CMP0]] |
| ; |
| %ne = icmp ne i8 %x, 0 |
| call void @llvm.assume(i1 %ne) |
| %z = call i8 @llvm.bitreverse.i8(i8 %x) |
| %cmp0 = icmp ugt i8 %z, %y |
| %cmp1 = icmp eq i8 %y, 0 |
| %r = or i1 %cmp0, %cmp1 |
| ret i1 %r |
| } |
| |
| define i1 @check_bswap(i16 %x, i16 %y) { |
| ; CHECK-LABEL: @check_bswap( |
| ; CHECK-NEXT: [[NE:%.*]] = icmp ne i16 [[X:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[NE]]) |
| ; CHECK-NEXT: [[Z:%.*]] = call i16 @llvm.bswap.i16(i16 [[X]]) |
| ; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i16 [[Z]], [[Y:%.*]] |
| ; CHECK-NEXT: ret i1 [[CMP0]] |
| ; |
| %ne = icmp ne i16 %x, 0 |
| call void @llvm.assume(i1 %ne) |
| %z = call i16 @llvm.bswap.i16(i16 %x) |
| %cmp0 = icmp ugt i16 %z, %y |
| %cmp1 = icmp eq i16 %y, 0 |
| %r = or i1 %cmp0, %cmp1 |
| ret i1 %r |
| } |
| |
| define i1 @check_ctpop(i8 %x, i8 %y) { |
| ; CHECK-LABEL: @check_ctpop( |
| ; CHECK-NEXT: [[NE:%.*]] = icmp eq i8 [[X:%.*]], 0 |
| ; CHECK-NEXT: br i1 [[NE]], label [[TRUE:%.*]], label [[FALSE:%.*]] |
| ; CHECK: true: |
| ; CHECK-NEXT: ret i1 [[NE]] |
| ; CHECK: false: |
| ; CHECK-NEXT: [[Z:%.*]] = call i8 @llvm.ctpop.i8(i8 [[X]]) |
| ; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i8 [[Z]], [[Y:%.*]] |
| ; CHECK-NEXT: ret i1 [[CMP0]] |
| ; |
| %ne = icmp eq i8 %x, 0 |
| br i1 %ne, label %true, label %false |
| true: |
| ret i1 %ne |
| false: |
| %z = call i8 @llvm.ctpop.i8(i8 %x) |
| %cmp0 = icmp ugt i8 %z, %y |
| %cmp1 = icmp eq i8 %y, 0 |
| %r = or i1 %cmp0, %cmp1 |
| ret i1 %r |
| } |
| |
| define i1 @check_add_sat(i8 %x, i8 %y, i8 %w) { |
| ; CHECK-LABEL: @check_add_sat( |
| ; CHECK-NEXT: [[NE:%.*]] = icmp ne i8 [[X:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[NE]]) |
| ; CHECK-NEXT: [[Z:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[X]], i8 [[Y:%.*]]) |
| ; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i8 [[Z]], [[W:%.*]] |
| ; CHECK-NEXT: ret i1 [[CMP0]] |
| ; |
| %ne = icmp ne i8 %x, 0 |
| call void @llvm.assume(i1 %ne) |
| %z = call i8 @llvm.uadd.sat.i8(i8 %x, i8 %y) |
| %cmp0 = icmp ugt i8 %z, %w |
| %cmp1 = icmp eq i8 %w, 0 |
| %r = or i1 %cmp0, %cmp1 |
| ret i1 %r |
| } |
| |
| define <2 x i1> @check_add_sat_vec(<2 x i8> %x, <2 x i8> %y, <2 x i8> %w) { |
| ; CHECK-LABEL: @check_add_sat_vec( |
| ; CHECK-NEXT: [[YNZ:%.*]] = or <2 x i8> [[Y:%.*]], <i8 2, i8 1> |
| ; CHECK-NEXT: [[Z:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[X:%.*]], <2 x i8> [[YNZ]]) |
| ; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt <2 x i8> [[Z]], [[W:%.*]] |
| ; CHECK-NEXT: ret <2 x i1> [[CMP0]] |
| ; |
| %ynz = or <2 x i8> %y, <i8 2, i8 1> |
| %z = call <2 x i8> @llvm.uadd.sat.2xi8(<2 x i8> %x, <2 x i8> %ynz) |
| %cmp0 = icmp ugt <2 x i8> %z, %w |
| %cmp1 = icmp eq <2 x i8> %w, <i8 0, i8 0> |
| %r = or <2 x i1> %cmp0, %cmp1 |
| ret <2 x i1> %r |
| } |
| |
| define <2 x i1> @shl_nz_bounded_cnt_vec(<2 x i32> %x, <2 x i32> %y) { |
| ; CHECK-LABEL: @shl_nz_bounded_cnt_vec( |
| ; CHECK-NEXT: ret <2 x i1> zeroinitializer |
| ; |
| %cnt = and <2 x i32> %x, <i32 16, i32 24> |
| %val = or <2 x i32> %y, <i32 131088, i32 16> |
| %shl = shl <2 x i32> %val, %cnt |
| %r = icmp eq <2 x i32> %shl, zeroinitializer |
| ret <2 x i1> %r |
| } |
| |
| define i1 @shl_nz_bounded_cnt(i32 %cnt, i32 %y) { |
| ; CHECK-LABEL: @shl_nz_bounded_cnt( |
| ; CHECK-NEXT: [[CNT_ULT4:%.*]] = icmp ult i32 [[CNT:%.*]], 4 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[CNT_ULT4]]) |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %cnt_ult4 = icmp ult i32 %cnt, 4 |
| call void @llvm.assume(i1 %cnt_ult4) |
| %val = or i32 %y, 131072 |
| %shl = shl i32 %val, %cnt |
| %r = icmp eq i32 %shl, 0 |
| ret i1 %r |
| } |
| |
| define <2 x i1> @shl_nz_bounded_cnt_vec_todo_no_common_bit(<2 x i32> %x, <2 x i32> %y) { |
| ; CHECK-LABEL: @shl_nz_bounded_cnt_vec_todo_no_common_bit( |
| ; CHECK-NEXT: [[CNT:%.*]] = and <2 x i32> [[X:%.*]], <i32 16, i32 32> |
| ; CHECK-NEXT: [[VAL:%.*]] = or <2 x i32> [[Y:%.*]], splat (i32 16) |
| ; CHECK-NEXT: [[SHL:%.*]] = shl <2 x i32> [[VAL]], [[CNT]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq <2 x i32> [[SHL]], zeroinitializer |
| ; CHECK-NEXT: ret <2 x i1> [[R]] |
| ; |
| %cnt = and <2 x i32> %x, <i32 16, i32 32> |
| %val = or <2 x i32> %y, <i32 16, i32 16> |
| %shl = shl <2 x i32> %val, %cnt |
| %r = icmp eq <2 x i32> %shl, zeroinitializer |
| ret <2 x i1> %r |
| } |
| |
| define i1 @shl_maybe_zero_bounded_cnt_fail(i32 %x, i32 %y) { |
| ; CHECK-LABEL: @shl_maybe_zero_bounded_cnt_fail( |
| ; CHECK-NEXT: [[CNT:%.*]] = and i32 [[X:%.*]], 16 |
| ; CHECK-NEXT: [[VAL:%.*]] = or i32 [[Y:%.*]], 65536 |
| ; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[VAL]], [[CNT]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[SHL]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %cnt = and i32 %x, 16 |
| %val = or i32 %y, 65536 |
| %shl = shl i32 %val, %cnt |
| %r = icmp eq i32 %shl, 0 |
| ret i1 %r |
| } |
| |
| define i1 @shl_non_zero_nsw(i8 %s, i8 %cnt) { |
| ; CHECK-LABEL: @shl_non_zero_nsw( |
| ; CHECK-NEXT: [[NZ:%.*]] = icmp ne i8 [[S:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[NZ]]) |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %nz = icmp ne i8 %s, 0 |
| call void @llvm.assume(i1 %nz) |
| %v = shl nsw i8 %s, %cnt |
| %r = icmp eq i8 %v, 0 |
| ret i1 %r |
| } |
| |
| define i1 @shl_maybe_zero_nsw_fail(i8 %s, i8 %cnt) { |
| ; CHECK-LABEL: @shl_maybe_zero_nsw_fail( |
| ; CHECK-NEXT: [[V:%.*]] = shl nsw i8 [[S:%.*]], [[CNT:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[V]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %v = shl nsw i8 %s, %cnt |
| %r = icmp eq i8 %v, 0 |
| ret i1 %r |
| } |
| |
| define i1 @shl_out_of_range_is_poison(i32 %v, i32 %c) { |
| ; CHECK-LABEL: @shl_out_of_range_is_poison( |
| ; CHECK-NEXT: ret i1 poison |
| ; |
| %sval = or i32 %v, 32 |
| %shl = shl i32 %c, %sval |
| %z = icmp eq i32 %shl, 0 |
| ret i1 %z |
| } |
| |
| define i1 @lshr_nz_bounded_cnt(i32 %cnt, i32 %y) { |
| ; CHECK-LABEL: @lshr_nz_bounded_cnt( |
| ; CHECK-NEXT: [[CNT_ULT4:%.*]] = icmp ult i32 [[CNT:%.*]], 4 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[CNT_ULT4]]) |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %cnt_ult4 = icmp ult i32 %cnt, 4 |
| call void @llvm.assume(i1 %cnt_ult4) |
| %val = or i32 %y, 90 |
| %shl = lshr i32 %val, %cnt |
| %r = icmp eq i32 %shl, 0 |
| ret i1 %r |
| } |
| |
| define <2 x i1> @ashr_nz_bounded_cnt_vec(<2 x i32> %x, <2 x i32> %y) { |
| ; CHECK-LABEL: @ashr_nz_bounded_cnt_vec( |
| ; CHECK-NEXT: ret <2 x i1> zeroinitializer |
| ; |
| %cnt = and <2 x i32> %x, <i32 16, i32 24> |
| %val = or <2 x i32> %y, <i32 402784272, i32 268697601> |
| %shl = ashr <2 x i32> %val, %cnt |
| %r = icmp eq <2 x i32> %shl, zeroinitializer |
| ret <2 x i1> %r |
| } |
| |
| define i1 @lshr_nz_bounded_cnt_fail(i32 %cnt, i32 %y) { |
| ; CHECK-LABEL: @lshr_nz_bounded_cnt_fail( |
| ; CHECK-NEXT: [[CNT_ULT:%.*]] = icmp ult i32 [[CNT:%.*]], 20 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[CNT_ULT]]) |
| ; CHECK-NEXT: [[VAL:%.*]] = or i32 [[Y:%.*]], 131072 |
| ; CHECK-NEXT: [[SHL:%.*]] = lshr i32 [[VAL]], [[CNT]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[SHL]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %cnt_ult = icmp ult i32 %cnt, 20 |
| call void @llvm.assume(i1 %cnt_ult) |
| %val = or i32 %y, 131072 |
| %shl = lshr i32 %val, %cnt |
| %r = icmp eq i32 %shl, 0 |
| ret i1 %r |
| } |
| |
| define <2 x i1> @ashr_nz_bounded_cnt_vec_fail(<2 x i32> %x, <2 x i32> %y) { |
| ; CHECK-LABEL: @ashr_nz_bounded_cnt_vec_fail( |
| ; CHECK-NEXT: [[CNT:%.*]] = and <2 x i32> [[X:%.*]], splat (i32 24) |
| ; CHECK-NEXT: [[VAL:%.*]] = or <2 x i32> [[Y:%.*]], <i32 131088, i32 268697601> |
| ; CHECK-NEXT: [[SHL:%.*]] = ashr <2 x i32> [[VAL]], [[CNT]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq <2 x i32> [[SHL]], zeroinitializer |
| ; CHECK-NEXT: ret <2 x i1> [[R]] |
| ; |
| %cnt = and <2 x i32> %x, <i32 24, i32 24> |
| %val = or <2 x i32> %y, <i32 131088, i32 268697601> |
| %shl = ashr <2 x i32> %val, %cnt |
| %r = icmp eq <2 x i32> %shl, zeroinitializer |
| ret <2 x i1> %r |
| } |
| |
| define i1 @lshr_nonzero_and_shift_out_zeros(i32 %cnt, i32 %y) { |
| ; CHECK-LABEL: @lshr_nonzero_and_shift_out_zeros( |
| ; CHECK-NEXT: [[CNT_ULT:%.*]] = icmp ult i32 [[CNT:%.*]], 4 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[CNT_ULT]]) |
| ; CHECK-NEXT: [[VAL:%.*]] = and i32 [[Y:%.*]], -131072 |
| ; CHECK-NEXT: [[VAL_NZ:%.*]] = icmp ne i32 [[VAL]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[VAL_NZ]]) |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %cnt_ult = icmp ult i32 %cnt, 4 |
| call void @llvm.assume(i1 %cnt_ult) |
| %val = and i32 %y, -131072 |
| %val_nz = icmp ne i32 %val, 0 |
| call void @llvm.assume(i1 %val_nz) |
| %shl = lshr i32 %val, %cnt |
| %r = icmp eq i32 %shl, 0 |
| ret i1 %r |
| } |
| |
| define i1 @ashr_nonzero_and_shift_out_zeros(i32 %ccnt, i32 %y) { |
| ; CHECK-LABEL: @ashr_nonzero_and_shift_out_zeros( |
| ; CHECK-NEXT: [[VAL:%.*]] = and i32 [[Y:%.*]], -131072 |
| ; CHECK-NEXT: [[VAL_NZ:%.*]] = icmp ne i32 [[VAL]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[VAL_NZ]]) |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %cnt = and i32 %ccnt, 7 |
| %val = and i32 %y, -131072 |
| %val_nz = icmp ne i32 %val, 0 |
| call void @llvm.assume(i1 %val_nz) |
| %shl = ashr i32 %val, %cnt |
| %r = icmp eq i32 %shl, 0 |
| ret i1 %r |
| } |
| |
| define i1 @shl_nonzero_and_shift_out_zeros(i32 %ccnt, i32 %y) { |
| ; CHECK-LABEL: @shl_nonzero_and_shift_out_zeros( |
| ; CHECK-NEXT: [[VAL:%.*]] = and i32 [[Y:%.*]], 131071 |
| ; CHECK-NEXT: [[VAL_NZ:%.*]] = icmp ne i32 [[VAL]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[VAL_NZ]]) |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %cnt = and i32 %ccnt, 6 |
| %val = and i32 %y, 131071 |
| %val_nz = icmp ne i32 %val, 0 |
| call void @llvm.assume(i1 %val_nz) |
| %shl = shl i32 %val, %cnt |
| %r = icmp eq i32 %shl, 0 |
| ret i1 %r |
| } |
| |
| define i1 @lshr_nonzero_and_shift_out_zeros_fail(i32 %cnt, i32 %y) { |
| ; CHECK-LABEL: @lshr_nonzero_and_shift_out_zeros_fail( |
| ; CHECK-NEXT: [[CNT_ULT:%.*]] = icmp ult i32 [[CNT:%.*]], 19 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[CNT_ULT]]) |
| ; CHECK-NEXT: [[VAL:%.*]] = and i32 [[Y:%.*]], -131072 |
| ; CHECK-NEXT: [[VAL_NZ:%.*]] = icmp ne i32 [[VAL]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[VAL_NZ]]) |
| ; CHECK-NEXT: [[SHL:%.*]] = lshr i32 [[VAL]], [[CNT]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[SHL]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %cnt_ult = icmp ult i32 %cnt, 19 |
| call void @llvm.assume(i1 %cnt_ult) |
| %val = and i32 %y, -131072 |
| %val_nz = icmp ne i32 %val, 0 |
| call void @llvm.assume(i1 %val_nz) |
| %shl = lshr i32 %val, %cnt |
| %r = icmp eq i32 %shl, 0 |
| ret i1 %r |
| } |
| |
| define i1 @ashr_nonzero_and_shift_out_zeros_fail(i32 %ccnt, i32 %y) { |
| ; CHECK-LABEL: @ashr_nonzero_and_shift_out_zeros_fail( |
| ; CHECK-NEXT: [[CNT:%.*]] = and i32 [[CCNT:%.*]], 18 |
| ; CHECK-NEXT: [[VAL:%.*]] = and i32 [[Y:%.*]], -131072 |
| ; CHECK-NEXT: [[VAL_NZ:%.*]] = icmp ne i32 [[VAL]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[VAL_NZ]]) |
| ; CHECK-NEXT: [[SHL:%.*]] = ashr i32 [[VAL]], [[CNT]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[SHL]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %cnt = and i32 %ccnt, 18 |
| %val = and i32 %y, -131072 |
| %val_nz = icmp ne i32 %val, 0 |
| call void @llvm.assume(i1 %val_nz) |
| %shl = ashr i32 %val, %cnt |
| %r = icmp eq i32 %shl, 0 |
| ret i1 %r |
| } |
| |
| define i1 @shl_nonzero_and_shift_out_zeros_fail(i32 %ccnt, i32 %y) { |
| ; CHECK-LABEL: @shl_nonzero_and_shift_out_zeros_fail( |
| ; CHECK-NEXT: [[CNT:%.*]] = and i32 [[CCNT:%.*]], 6 |
| ; CHECK-NEXT: [[VAL:%.*]] = and i32 [[Y:%.*]], 268435455 |
| ; CHECK-NEXT: [[VAL_NZ:%.*]] = icmp ne i32 [[VAL]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[VAL_NZ]]) |
| ; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[VAL]], [[CNT]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[SHL]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %cnt = and i32 %ccnt, 6 |
| %val = and i32 %y, 268435455 |
| %val_nz = icmp ne i32 %val, 0 |
| call void @llvm.assume(i1 %val_nz) |
| %shl = shl i32 %val, %cnt |
| %r = icmp eq i32 %shl, 0 |
| ret i1 %r |
| } |
| |
| define i1 @sub_nonzero_ops_ne(i8 %xx, i8 %yy, i8 %z) { |
| ; CHECK-LABEL: @sub_nonzero_ops_ne( |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %x = and i8 %xx, 191 |
| %y = or i8 %yy, 64 |
| %s = sub i8 %x, %y |
| %exp = or i8 %z, %s |
| %r = icmp eq i8 %exp, 0 |
| ret i1 %r |
| } |
| |
| define i1 @sub_nonzero_ops_ne_fail(i8 %xx, i8 %yy, i8 %z) { |
| ; CHECK-LABEL: @sub_nonzero_ops_ne_fail( |
| ; CHECK-NEXT: [[X:%.*]] = and i8 [[XX:%.*]], -64 |
| ; CHECK-NEXT: [[Y:%.*]] = or i8 [[YY:%.*]], 64 |
| ; CHECK-NEXT: [[S:%.*]] = sub i8 [[X]], [[Y]] |
| ; CHECK-NEXT: [[EXP:%.*]] = or i8 [[Z:%.*]], [[S]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[EXP]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %x = and i8 %xx, 192 |
| %y = or i8 %yy, 64 |
| %s = sub i8 %x, %y |
| %exp = or i8 %z, %s |
| %r = icmp eq i8 %exp, 0 |
| ret i1 %r |
| } |
| |
| define i1 @add_nonzero_nuw(i8 %x, i8 %y) { |
| ; CHECK-LABEL: @add_nonzero_nuw( |
| ; CHECK-NEXT: [[X_NZ:%.*]] = icmp ne i8 [[X:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[X_NZ]]) |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %x_nz = icmp ne i8 %x, 0 |
| call void @llvm.assume(i1 %x_nz) |
| %a = add nuw i8 %x, %y |
| %r = icmp eq i8 %a, 0 |
| ret i1 %r |
| } |
| |
| define i1 @add_nonzero_nsw_fail(i8 %x, i8 %y) { |
| ; CHECK-LABEL: @add_nonzero_nsw_fail( |
| ; CHECK-NEXT: [[X_NZ:%.*]] = icmp ne i8 [[X:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[X_NZ]]) |
| ; CHECK-NEXT: [[A:%.*]] = add nsw i8 [[X]], [[Y:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[A]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %x_nz = icmp ne i8 %x, 0 |
| call void @llvm.assume(i1 %x_nz) |
| %a = add nsw i8 %x, %y |
| %r = icmp eq i8 %a, 0 |
| ret i1 %r |
| } |
| |
| define i1 @udiv_y_le_x(i8 %xx, i8 %yy, i8 %z) { |
| ; CHECK-LABEL: @udiv_y_le_x( |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %x = or i8 %xx, 7 |
| %y = and i8 %yy, 7 |
| %d = udiv i8 %x, %y |
| %o = or i8 %d, %z |
| %r = icmp eq i8 %o, 0 |
| ret i1 %r |
| } |
| |
| define i1 @udiv_y_le_x_fail(i8 %xx, i8 %yy, i8 %z) { |
| ; CHECK-LABEL: @udiv_y_le_x_fail( |
| ; CHECK-NEXT: [[X:%.*]] = or i8 [[XX:%.*]], 6 |
| ; CHECK-NEXT: [[Y:%.*]] = and i8 [[YY:%.*]], 7 |
| ; CHECK-NEXT: [[D:%.*]] = udiv i8 [[X]], [[Y]] |
| ; CHECK-NEXT: [[O:%.*]] = or i8 [[D]], [[Z:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[O]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %x = or i8 %xx, 6 |
| %y = and i8 %yy, 7 |
| %d = udiv i8 %x, %y |
| %o = or i8 %d, %z |
| %r = icmp eq i8 %o, 0 |
| ret i1 %r |
| } |
| |
| define i1 @fshr_non_zero(i8 %x, i8 %y, i8 %z) { |
| ; CHECK-LABEL: @fshr_non_zero( |
| ; CHECK-NEXT: [[PRED0:%.*]] = icmp ne i8 [[X:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[PRED0]]) |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %pred0 = icmp ne i8 %x, 0 |
| call void @llvm.assume(i1 %pred0) |
| %v = tail call i8 @llvm.fshr.i8(i8 %x, i8 %x, i8 %y) |
| %or = or i8 %v, %z |
| %r = icmp eq i8 %or, 0 |
| ret i1 %r |
| } |
| |
| define i1 @fshr_non_zero_fail(i8 %x, i8 %y, i8 %z, i8 %w) { |
| ; CHECK-LABEL: @fshr_non_zero_fail( |
| ; CHECK-NEXT: [[PRED0:%.*]] = icmp ne i8 [[X:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[PRED0]]) |
| ; CHECK-NEXT: [[PRED1:%.*]] = icmp ne i8 [[W:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[PRED1]]) |
| ; CHECK-NEXT: [[V:%.*]] = tail call i8 @llvm.fshr.i8(i8 [[X]], i8 [[W]], i8 [[Y:%.*]]) |
| ; CHECK-NEXT: [[OR:%.*]] = or i8 [[V]], [[Z:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[OR]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %pred0 = icmp ne i8 %x, 0 |
| call void @llvm.assume(i1 %pred0) |
| %pred1 = icmp ne i8 %w, 0 |
| call void @llvm.assume(i1 %pred1) |
| %v = tail call i8 @llvm.fshr.i8(i8 %x, i8 %w, i8 %y) |
| %or = or i8 %v, %z |
| %r = icmp eq i8 %or, 0 |
| ret i1 %r |
| } |
| |
| define i1 @fshl_non_zero(i8 %x, i8 %y, i8 %z) { |
| ; CHECK-LABEL: @fshl_non_zero( |
| ; CHECK-NEXT: [[PRED0:%.*]] = icmp ne i8 [[X:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[PRED0]]) |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %pred0 = icmp ne i8 %x, 0 |
| call void @llvm.assume(i1 %pred0) |
| %v = tail call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 %y) |
| %or = or i8 %v, %z |
| %r = icmp eq i8 %or, 0 |
| ret i1 %r |
| } |
| |
| define i1 @fshl_non_zero_fail(i8 %x, i8 %y, i8 %z, i8 %w) { |
| ; CHECK-LABEL: @fshl_non_zero_fail( |
| ; CHECK-NEXT: [[PRED0:%.*]] = icmp ne i8 [[X:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[PRED0]]) |
| ; CHECK-NEXT: [[PRED1:%.*]] = icmp ne i8 [[W:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[PRED1]]) |
| ; CHECK-NEXT: [[V:%.*]] = tail call i8 @llvm.fshl.i8(i8 [[X]], i8 [[W]], i8 [[Y:%.*]]) |
| ; CHECK-NEXT: [[OR:%.*]] = or i8 [[V]], [[Z:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[OR]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %pred0 = icmp ne i8 %x, 0 |
| call void @llvm.assume(i1 %pred0) |
| %pred1 = icmp ne i8 %w, 0 |
| call void @llvm.assume(i1 %pred1) |
| %v = tail call i8 @llvm.fshl.i8(i8 %x, i8 %w, i8 %y) |
| %or = or i8 %v, %z |
| %r = icmp eq i8 %or, 0 |
| ret i1 %r |
| } |
| |
| define i1 @bitcast_nonzero(<2 x i8> %xx, i16 %ind) { |
| ; CHECK-LABEL: @bitcast_nonzero( |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %xa = add nuw nsw <2 x i8> %xx, <i8 1, i8 1> |
| %x = bitcast <2 x i8> %xa to i16 |
| %z = or i16 %x, %ind |
| %r = icmp eq i16 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @bitcast_todo_partial_nonzero_vec_to_int(<2 x i8> %xx, i16 %ind) { |
| ; CHECK-LABEL: @bitcast_todo_partial_nonzero_vec_to_int( |
| ; CHECK-NEXT: [[XA:%.*]] = add nuw nsw <2 x i8> [[XX:%.*]], <i8 1, i8 0> |
| ; CHECK-NEXT: [[X:%.*]] = bitcast <2 x i8> [[XA]] to i16 |
| ; CHECK-NEXT: [[Z:%.*]] = or i16 [[X]], [[IND:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i16 [[Z]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %xa = add nuw nsw <2 x i8> %xx, <i8 1, i8 0> |
| %x = bitcast <2 x i8> %xa to i16 |
| %z = or i16 %x, %ind |
| %r = icmp eq i16 %z, 0 |
| ret i1 %r |
| } |
| |
| define <2 x i1> @bitcast_fail_nonzero_int_to_vec(i16 %xx, <2 x i8> %ind) { |
| ; CHECK-LABEL: @bitcast_fail_nonzero_int_to_vec( |
| ; CHECK-NEXT: [[XA:%.*]] = add nuw nsw i16 [[XX:%.*]], 1 |
| ; CHECK-NEXT: [[X:%.*]] = bitcast i16 [[XA]] to <2 x i8> |
| ; CHECK-NEXT: [[Z:%.*]] = or <2 x i8> [[X]], [[IND:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq <2 x i8> [[Z]], zeroinitializer |
| ; CHECK-NEXT: ret <2 x i1> [[R]] |
| ; |
| %xa = add nuw nsw i16 %xx, 1 |
| %x = bitcast i16 %xa to <2 x i8> |
| %z = or <2 x i8> %x, %ind |
| %r = icmp eq <2 x i8> %z, zeroinitializer |
| ret <2 x i1> %r |
| } |
| |
| define <2 x i1> @bitcast_veci8_to_veci16(<4 x i8> %xx, <2 x i16> %ind) { |
| ; CHECK-LABEL: @bitcast_veci8_to_veci16( |
| ; CHECK-NEXT: ret <2 x i1> zeroinitializer |
| ; |
| %xa = add nuw nsw <4 x i8> %xx, <i8 1, i8 1, i8 1, i8 1> |
| %x = bitcast <4 x i8> %xa to <2 x i16> |
| %z = or <2 x i16> %x, %ind |
| %r = icmp eq <2 x i16> %z, zeroinitializer |
| ret <2 x i1> %r |
| } |
| |
| define <3 x i1> @bitcast_veci3_to_veci4_fail_not_multiple(<4 x i3> %xx, <3 x i4> %ind) { |
| ; CHECK-LABEL: @bitcast_veci3_to_veci4_fail_not_multiple( |
| ; CHECK-NEXT: [[XA:%.*]] = add nuw nsw <4 x i3> [[XX:%.*]], splat (i3 1) |
| ; CHECK-NEXT: [[X:%.*]] = bitcast <4 x i3> [[XA]] to <3 x i4> |
| ; CHECK-NEXT: [[Z:%.*]] = or <3 x i4> [[X]], [[IND:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq <3 x i4> [[Z]], zeroinitializer |
| ; CHECK-NEXT: ret <3 x i1> [[R]] |
| ; |
| %xa = add nuw nsw <4 x i3> %xx, <i3 1, i3 1, i3 1, i3 1> |
| %x = bitcast <4 x i3> %xa to <3 x i4> |
| %z = or <3 x i4> %x, %ind |
| %r = icmp eq <3 x i4> %z, zeroinitializer |
| ret <3 x i1> %r |
| } |
| |
| define <4 x i1> @bitcast_fail_veci16_to_veci8(<2 x i16> %xx, <4 x i8> %ind) { |
| ; CHECK-LABEL: @bitcast_fail_veci16_to_veci8( |
| ; CHECK-NEXT: [[XA:%.*]] = add nuw nsw <2 x i16> [[XX:%.*]], splat (i16 1) |
| ; CHECK-NEXT: [[X:%.*]] = bitcast <2 x i16> [[XA]] to <4 x i8> |
| ; CHECK-NEXT: [[Z:%.*]] = or <4 x i8> [[X]], [[IND:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq <4 x i8> [[Z]], zeroinitializer |
| ; CHECK-NEXT: ret <4 x i1> [[R]] |
| ; |
| %xa = add nuw nsw <2 x i16> %xx, <i16 1, i16 1> |
| %x = bitcast <2 x i16> %xa to <4 x i8> |
| %z = or <4 x i8> %x, %ind |
| %r = icmp eq <4 x i8> %z, zeroinitializer |
| ret <4 x i1> %r |
| } |
| |
| define i1 @bitcast_nonzero_fail_dont_check_float(float %xx, i32 %ind) { |
| ; CHECK-LABEL: @bitcast_nonzero_fail_dont_check_float( |
| ; CHECK-NEXT: [[XA:%.*]] = call float @llvm.maximum.f32(float [[XX:%.*]], float 1.000000e+00) |
| ; CHECK-NEXT: [[X:%.*]] = bitcast float [[XA]] to i32 |
| ; CHECK-NEXT: [[Z:%.*]] = or i32 [[X]], [[IND:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[Z]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %xa = call float @llvm.maximum.f32(float %xx, float 1.000000e+00) |
| %x = bitcast float %xa to i32 |
| %z = or i32 %x, %ind |
| %r = icmp eq i32 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @ctlz_true_nonzero(i8 %xx, i8 %ind) { |
| ; CHECK-LABEL: @ctlz_true_nonzero( |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %xs = lshr i8 %xx, 1 |
| %x = call i8 @llvm.ctlz.i8(i8 %xs, i1 true) |
| %z = or i8 %x, %ind |
| %r = icmp eq i8 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @ctlz_false_nonzero(i8 %xx, i8 %ind) { |
| ; CHECK-LABEL: @ctlz_false_nonzero( |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %xa = and i8 %xx, 127 |
| %x = call i8 @llvm.ctlz.i8(i8 %xa, i1 true) |
| %z = or i8 %x, %ind |
| %r = icmp eq i8 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @ctlz_nonzero_fail_maybe_neg(i8 %xx, i8 %ind) { |
| ; CHECK-LABEL: @ctlz_nonzero_fail_maybe_neg( |
| ; CHECK-NEXT: [[XS:%.*]] = ashr i8 [[XX:%.*]], 1 |
| ; CHECK-NEXT: [[X:%.*]] = call i8 @llvm.ctlz.i8(i8 [[XS]], i1 true) |
| ; CHECK-NEXT: [[Z:%.*]] = or i8 [[X]], [[IND:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[Z]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %xs = ashr i8 %xx, 1 |
| %x = call i8 @llvm.ctlz.i8(i8 %xs, i1 true) |
| %z = or i8 %x, %ind |
| %r = icmp eq i8 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @cttz_true_nonzero(i8 %xx, i8 %ind) { |
| ; CHECK-LABEL: @cttz_true_nonzero( |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %xs = shl i8 %xx, 1 |
| %x = call i8 @llvm.cttz.i8(i8 %xs, i1 true) |
| %z = or i8 %x, %ind |
| %r = icmp eq i8 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @cttz_false_nonzero(i8 %xx, i8 %ind) { |
| ; CHECK-LABEL: @cttz_false_nonzero( |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %xa = and i8 %xx, -2 |
| %x = call i8 @llvm.cttz.i8(i8 %xa, i1 true) |
| %z = or i8 %x, %ind |
| %r = icmp eq i8 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @cttz_nonzero_fail_maybe_odd(i8 %xx, i8 %cnt, i8 %ind) { |
| ; CHECK-LABEL: @cttz_nonzero_fail_maybe_odd( |
| ; CHECK-NEXT: [[XS:%.*]] = shl i8 [[XX:%.*]], [[CNT:%.*]] |
| ; CHECK-NEXT: [[X:%.*]] = call i8 @llvm.cttz.i8(i8 [[XS]], i1 true) |
| ; CHECK-NEXT: [[Z:%.*]] = or i8 [[X]], [[IND:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[Z]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %xs = shl i8 %xx, %cnt |
| %x = call i8 @llvm.cttz.i8(i8 %xs, i1 true) |
| %z = or i8 %x, %ind |
| %r = icmp eq i8 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @mul_nonzero_odd(i8 %xx, i8 %y, i8 %ind) { |
| ; CHECK-LABEL: @mul_nonzero_odd( |
| ; CHECK-NEXT: [[Y_NZ:%.*]] = icmp ne i8 [[Y:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[Y_NZ]]) |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %xo = or i8 %xx, 1 |
| %y_nz = icmp ne i8 %y, 0 |
| call void @llvm.assume(i1 %y_nz) |
| %x = mul i8 %xo, %y |
| %z = or i8 %x, %ind |
| %r = icmp eq i8 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @mul_nonzero_odd_fail_y_maybe_zero(i8 %xx, i8 %y, i8 %ind) { |
| ; CHECK-LABEL: @mul_nonzero_odd_fail_y_maybe_zero( |
| ; CHECK-NEXT: [[XO:%.*]] = or i8 [[XX:%.*]], 1 |
| ; CHECK-NEXT: [[X:%.*]] = mul i8 [[XO]], [[Y:%.*]] |
| ; CHECK-NEXT: [[Z:%.*]] = or i8 [[X]], [[IND:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[Z]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %xo = or i8 %xx, 1 |
| %x = mul i8 %xo, %y |
| %z = or i8 %x, %ind |
| %r = icmp eq i8 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @sshl_nonzero(i8 %xx, i8 %y, i8 %ind) { |
| ; CHECK-LABEL: @sshl_nonzero( |
| ; CHECK-NEXT: [[X_NZ:%.*]] = icmp ne i8 [[XX:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[X_NZ]]) |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %x_nz = icmp ne i8 %xx, 0 |
| call void @llvm.assume(i1 %x_nz) |
| %x = call i8 @llvm.sshl.sat.i8(i8 %xx, i8 %y) |
| %z = or i8 %x, %ind |
| %r = icmp eq i8 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @sshl_nonzero_fail_x_maybe_z(i8 %xx, i8 %y, i8 %ind) { |
| ; CHECK-LABEL: @sshl_nonzero_fail_x_maybe_z( |
| ; CHECK-NEXT: [[X:%.*]] = call i8 @llvm.sshl.sat.i8(i8 [[XX:%.*]], i8 [[Y:%.*]]) |
| ; CHECK-NEXT: [[Z:%.*]] = or i8 [[X]], [[IND:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[Z]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %x = call i8 @llvm.sshl.sat.i8(i8 %xx, i8 %y) |
| %z = or i8 %x, %ind |
| %r = icmp eq i8 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @ushl_nonzero(i8 %xx, i8 %y, i8 %ind) { |
| ; CHECK-LABEL: @ushl_nonzero( |
| ; CHECK-NEXT: [[X_NZ:%.*]] = icmp ne i8 [[XX:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[X_NZ]]) |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %x_nz = icmp ne i8 %xx, 0 |
| call void @llvm.assume(i1 %x_nz) |
| %x = call i8 @llvm.ushl.sat.i8(i8 %xx, i8 %y) |
| %z = or i8 %x, %ind |
| %r = icmp eq i8 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @ushl_nonzero_fail_x_maybe_z(i8 %xx, i8 %y, i8 %ind) { |
| ; CHECK-LABEL: @ushl_nonzero_fail_x_maybe_z( |
| ; CHECK-NEXT: [[X:%.*]] = call i8 @llvm.ushl.sat.i8(i8 [[XX:%.*]], i8 [[Y:%.*]]) |
| ; CHECK-NEXT: [[Z:%.*]] = or i8 [[X]], [[IND:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[Z]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %x = call i8 @llvm.ushl.sat.i8(i8 %xx, i8 %y) |
| %z = or i8 %x, %ind |
| %r = icmp eq i8 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @ssub_sat_nonzero(i8 %xx, i8 %yy, i8 %ind) { |
| ; CHECK-LABEL: @ssub_sat_nonzero( |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %xa = and i8 %xx, 191 |
| %yo = or i8 %yy, 64 |
| %x = call i8 @llvm.ssub.sat.i8(i8 %xa, i8 %yo) |
| %z = or i8 %x, %ind |
| %r = icmp eq i8 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @ssub_sat_nonzero_ne_known_bits_fail_overlap(i8 %xx, i8 %yy, i8 %ind) { |
| ; CHECK-LABEL: @ssub_sat_nonzero_ne_known_bits_fail_overlap( |
| ; CHECK-NEXT: [[XA:%.*]] = and i8 [[XX:%.*]], -64 |
| ; CHECK-NEXT: [[YO:%.*]] = or i8 [[YY:%.*]], 64 |
| ; CHECK-NEXT: [[X:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[XA]], i8 [[YO]]) |
| ; CHECK-NEXT: [[Z:%.*]] = or i8 [[X]], [[IND:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[Z]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %xa = and i8 %xx, 192 |
| %yo = or i8 %yy, 64 |
| %x = call i8 @llvm.ssub.sat.i8(i8 %xa, i8 %yo) |
| %z = or i8 %x, %ind |
| %r = icmp eq i8 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @usub_sat_nonzero(i8 %xx, i8 %yy, i8 %ind) { |
| ; CHECK-LABEL: @usub_sat_nonzero( |
| ; CHECK-NEXT: [[Y_ULT_31:%.*]] = icmp ult i8 [[YY:%.*]], 31 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[Y_ULT_31]]) |
| ; CHECK-NEXT: [[XO:%.*]] = or i8 [[XX:%.*]], 34 |
| ; CHECK-NEXT: [[X:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[XO]], i8 [[YY]]) |
| ; CHECK-NEXT: [[Z:%.*]] = or i8 [[X]], [[IND:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[Z]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %y_ult_31 = icmp ult i8 %yy, 31 |
| call void @llvm.assume(i1 %y_ult_31) |
| %xo = or i8 %xx, 34 |
| %x = call i8 @llvm.usub.sat.i8(i8 %xo, i8 %yy) |
| %z = or i8 %x, %ind |
| %r = icmp eq i8 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @usub_sat_nonzero_fail(i8 %xx, i8 %yy, i8 %ind) { |
| ; CHECK-LABEL: @usub_sat_nonzero_fail( |
| ; CHECK-NEXT: [[XA:%.*]] = and i8 [[XX:%.*]], 16 |
| ; CHECK-NEXT: [[YO:%.*]] = or i8 [[YY:%.*]], 7 |
| ; CHECK-NEXT: [[X:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[XA]], i8 [[YO]]) |
| ; CHECK-NEXT: [[Z:%.*]] = or i8 [[X]], [[IND:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[Z]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %xa = and i8 %xx, 16 |
| %yo = or i8 %yy, 7 |
| %x = call i8 @llvm.usub.sat.i8(i8 %xa, i8 %yo) |
| %z = or i8 %x, %ind |
| %r = icmp eq i8 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @sadd_sat_nonzero(i8 %xx, i8 %yy, i8 %ind) { |
| ; CHECK-LABEL: @sadd_sat_nonzero( |
| ; CHECK-NEXT: [[X_STRICT_POS:%.*]] = icmp sgt i8 [[XX:%.*]], 0 |
| ; CHECK-NEXT: [[Y_POS:%.*]] = icmp sge i8 [[YY:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[X_STRICT_POS]]) |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[Y_POS]]) |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %x_strict_pos = icmp sgt i8 %xx, 0 |
| %y_pos = icmp sge i8 %yy, 0 |
| call void @llvm.assume(i1 %x_strict_pos) |
| call void @llvm.assume(i1 %y_pos) |
| %x = call i8 @llvm.sadd.sat.i8(i8 %xx, i8 %yy) |
| %z = or i8 %x, %ind |
| %r = icmp eq i8 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @sadd_sat_nonzero_fail_maybe_zz(i8 %xx, i8 %yy, i8 %ind) { |
| ; CHECK-LABEL: @sadd_sat_nonzero_fail_maybe_zz( |
| ; CHECK-NEXT: [[X_POS:%.*]] = icmp sge i8 [[XX:%.*]], 0 |
| ; CHECK-NEXT: [[Y_POS:%.*]] = icmp sge i8 [[YY:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[X_POS]]) |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[Y_POS]]) |
| ; CHECK-NEXT: [[X:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[XX]], i8 [[YY]]) |
| ; CHECK-NEXT: [[Z:%.*]] = or i8 [[X]], [[IND:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[Z]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %x_pos = icmp sge i8 %xx, 0 |
| %y_pos = icmp sge i8 %yy, 0 |
| call void @llvm.assume(i1 %x_pos) |
| call void @llvm.assume(i1 %y_pos) |
| %x = call i8 @llvm.sadd.sat.i8(i8 %xx, i8 %yy) |
| %z = or i8 %x, %ind |
| %r = icmp eq i8 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @umax_nonzero(i8 %xx, i8 %yy, i8 %ind) { |
| ; CHECK-LABEL: @umax_nonzero( |
| ; CHECK-NEXT: [[X_NZ:%.*]] = icmp ne i8 [[XX:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[X_NZ]]) |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %x_nz = icmp ne i8 %xx, 0 |
| call void @llvm.assume(i1 %x_nz) |
| %x = call i8 @llvm.umax.i8(i8 %xx, i8 %yy) |
| %z = or i8 %x, %ind |
| %r = icmp eq i8 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @umax_nonzero_fail_x_maybe_z(i8 %xx, i8 %yy, i8 %ind) { |
| ; CHECK-LABEL: @umax_nonzero_fail_x_maybe_z( |
| ; CHECK-NEXT: [[X_NZ:%.*]] = icmp sge i8 [[XX:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[X_NZ]]) |
| ; CHECK-NEXT: [[X:%.*]] = call i8 @llvm.umax.i8(i8 [[XX]], i8 [[YY:%.*]]) |
| ; CHECK-NEXT: [[Z:%.*]] = or i8 [[X]], [[IND:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[Z]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %x_nz = icmp sge i8 %xx, 0 |
| call void @llvm.assume(i1 %x_nz) |
| %x = call i8 @llvm.umax.i8(i8 %xx, i8 %yy) |
| %z = or i8 %x, %ind |
| %r = icmp eq i8 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @umin_nonzero(i8 %xx, i8 %yy, i8 %ind) { |
| ; CHECK-LABEL: @umin_nonzero( |
| ; CHECK-NEXT: [[X_NZ:%.*]] = icmp ne i8 [[XX:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[X_NZ]]) |
| ; CHECK-NEXT: [[Y_NZ:%.*]] = icmp ne i8 [[YY:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[Y_NZ]]) |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %x_nz = icmp ne i8 %xx, 0 |
| call void @llvm.assume(i1 %x_nz) |
| %y_nz = icmp ne i8 %yy, 0 |
| call void @llvm.assume(i1 %y_nz) |
| %x = call i8 @llvm.umin.i8(i8 %xx, i8 %yy) |
| %z = or i8 %x, %ind |
| %r = icmp eq i8 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @umin_nonzero_fail_y_maybe_z(i8 %xx, i8 %yy, i8 %ind) { |
| ; CHECK-LABEL: @umin_nonzero_fail_y_maybe_z( |
| ; CHECK-NEXT: [[X_NZ:%.*]] = icmp ne i8 [[XX:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[X_NZ]]) |
| ; CHECK-NEXT: [[X:%.*]] = call i8 @llvm.umin.i8(i8 [[XX]], i8 [[YY:%.*]]) |
| ; CHECK-NEXT: [[Z:%.*]] = or i8 [[X]], [[IND:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[Z]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %x_nz = icmp ne i8 %xx, 0 |
| call void @llvm.assume(i1 %x_nz) |
| %x = call i8 @llvm.umin.i8(i8 %xx, i8 %yy) |
| %z = or i8 %x, %ind |
| %r = icmp eq i8 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @smin_nonzero(i8 %xx, i8 %yy, i8 %ind) { |
| ; CHECK-LABEL: @smin_nonzero( |
| ; CHECK-NEXT: [[X_NZ:%.*]] = icmp ne i8 [[XX:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[X_NZ]]) |
| ; CHECK-NEXT: [[Y_NZ:%.*]] = icmp ne i8 [[YY:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[Y_NZ]]) |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %x_nz = icmp ne i8 %xx, 0 |
| call void @llvm.assume(i1 %x_nz) |
| %y_nz = icmp ne i8 %yy, 0 |
| call void @llvm.assume(i1 %y_nz) |
| %x = call i8 @llvm.umin.i8(i8 %xx, i8 %yy) |
| %z = or i8 %x, %ind |
| %r = icmp eq i8 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @smin_nonzero_neg_arg(i8 %xx, i8 %yy, i8 %ind) { |
| ; CHECK-LABEL: @smin_nonzero_neg_arg( |
| ; CHECK-NEXT: [[X_NEG:%.*]] = icmp slt i8 [[XX:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[X_NEG]]) |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %x_neg = icmp slt i8 %xx, 0 |
| call void @llvm.assume(i1 %x_neg) |
| %x = call i8 @llvm.smin.i8(i8 %xx, i8 %yy) |
| %z = or i8 %x, %ind |
| %r = icmp eq i8 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @smin_nonzero_fail_y_maybe_z(i8 %xx, i8 %yy, i8 %ind) { |
| ; CHECK-LABEL: @smin_nonzero_fail_y_maybe_z( |
| ; CHECK-NEXT: [[X_NZ:%.*]] = icmp sle i8 [[XX:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[X_NZ]]) |
| ; CHECK-NEXT: [[X:%.*]] = call i8 @llvm.smin.i8(i8 [[XX]], i8 [[YY:%.*]]) |
| ; CHECK-NEXT: [[Z:%.*]] = or i8 [[X]], [[IND:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[Z]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %x_nz = icmp sle i8 %xx, 0 |
| call void @llvm.assume(i1 %x_nz) |
| %x = call i8 @llvm.smin.i8(i8 %xx, i8 %yy) |
| %z = or i8 %x, %ind |
| %r = icmp eq i8 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @smax_nonzero_pos_arg(i8 %xx, i8 %yy, i8 %ind) { |
| ; CHECK-LABEL: @smax_nonzero_pos_arg( |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %ya = and i8 %yy, 127 |
| %yo = or i8 %ya, 1 |
| %x = call i8 @llvm.smax.i8(i8 %xx, i8 %yo) |
| %z = or i8 %x, %ind |
| %r = icmp eq i8 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @smax_nonzero_pos_arg_fail_nonstrict_pos(i8 %xx, i8 %yy, i8 %ind) { |
| ; CHECK-LABEL: @smax_nonzero_pos_arg_fail_nonstrict_pos( |
| ; CHECK-NEXT: [[Y_POS:%.*]] = icmp sge i8 [[YY:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[Y_POS]]) |
| ; CHECK-NEXT: [[X:%.*]] = call i8 @llvm.smax.i8(i8 [[XX:%.*]], i8 [[YY]]) |
| ; CHECK-NEXT: [[Z:%.*]] = or i8 [[X]], [[IND:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[Z]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %y_pos = icmp sge i8 %yy, 0 |
| call void @llvm.assume(i1 %y_pos) |
| %x = call i8 @llvm.smax.i8(i8 %xx, i8 %yy) |
| %z = or i8 %x, %ind |
| %r = icmp eq i8 %z, 0 |
| ret i1 %r |
| } |
| |
| define i1 @mul_nonzero_contains_nonzero_mul(i8 %x, i8 %y) { |
| ; CHECK-LABEL: @mul_nonzero_contains_nonzero_mul( |
| ; CHECK-NEXT: ret i1 true |
| ; |
| %xx = or i8 %x, 16 |
| %yy = or i8 %y, 8 |
| %xy = mul i8 %xx, %yy |
| %nz = icmp ne i8 %xy, 0 |
| ret i1 %nz |
| } |
| |
| define i1 @src_mul_maybe_zero_no_nonzero_mul(i8 %x, i8 %y) { |
| ; CHECK-LABEL: @src_mul_maybe_zero_no_nonzero_mul( |
| ; CHECK-NEXT: [[XX:%.*]] = or i8 [[X:%.*]], 96 |
| ; CHECK-NEXT: [[YY:%.*]] = or i8 [[Y:%.*]], 8 |
| ; CHECK-NEXT: [[XY:%.*]] = mul i8 [[XX]], [[YY]] |
| ; CHECK-NEXT: [[NZ:%.*]] = icmp ne i8 [[XY]], 0 |
| ; CHECK-NEXT: ret i1 [[NZ]] |
| ; |
| %xx = or i8 %x, 96 |
| %yy = or i8 %y, 8 |
| %xy = mul i8 %xx, %yy |
| %nz = icmp ne i8 %xy, 0 |
| ret i1 %nz |
| } |
| |
| define i1 @sdiv_known_non_zero(i8 %x, i8 %y) { |
| ; CHECK-LABEL: @sdiv_known_non_zero( |
| ; CHECK-NEXT: ret i1 true |
| ; |
| %xx0 = or i8 %x, 135 |
| %xx = and i8 %xx0, -2 |
| %xy = sdiv i8 %xx, -2 |
| %nz = icmp ne i8 %xy, 0 |
| ret i1 %nz |
| } |
| |
| define i1 @sdiv_known_non_zero2(i8 %x, i8 %y) { |
| ; CHECK-LABEL: @sdiv_known_non_zero2( |
| ; CHECK-NEXT: ret i1 true |
| ; |
| %xx0 = or i8 %x, 15 |
| %xx = and i8 %xx0, -4 |
| %yy = and i8 %y, 3 |
| %xy = sdiv i8 %xx, %yy |
| %nz = icmp ne i8 %xy, 0 |
| ret i1 %nz |
| } |
| |
| define i1 @sdiv_known_non_zero_fail(i8 %x, i8 %y) { |
| ; CHECK-LABEL: @sdiv_known_non_zero_fail( |
| ; CHECK-NEXT: [[XX:%.*]] = or i8 [[X:%.*]], 15 |
| ; CHECK-NEXT: [[YY:%.*]] = and i8 [[Y:%.*]], 3 |
| ; CHECK-NEXT: [[XY:%.*]] = sdiv i8 [[XX]], [[YY]] |
| ; CHECK-NEXT: [[NZ:%.*]] = icmp ne i8 [[XY]], 0 |
| ; CHECK-NEXT: ret i1 [[NZ]] |
| ; |
| %xx = or i8 %x, 15 |
| %yy = and i8 %y, 3 |
| %xy = sdiv i8 %xx, %yy |
| %nz = icmp ne i8 %xy, 0 |
| ret i1 %nz |
| } |
| |
| define <2 x i1> @cmp_excludes_zero_with_nonsplat_vec(<2 x i8> %a, <2 x i8> %b) { |
| ; CHECK-LABEL: @cmp_excludes_zero_with_nonsplat_vec( |
| ; CHECK-NEXT: ret <2 x i1> zeroinitializer |
| ; |
| %c = icmp sge <2 x i8> %a, <i8 1, i8 4> |
| %s = select <2 x i1> %c, <2 x i8> %a, <2 x i8> <i8 4, i8 5> |
| %and = or <2 x i8> %s, %b |
| %r = icmp eq <2 x i8> %and, zeroinitializer |
| ret <2 x i1> %r |
| } |
| |
| define <2 x i1> @cmp_excludes_zero_with_nonsplat_vec_wundef(<2 x i8> %a, <2 x i8> %b) { |
| ; CHECK-LABEL: @cmp_excludes_zero_with_nonsplat_vec_wundef( |
| ; CHECK-NEXT: [[C:%.*]] = icmp sge <2 x i8> [[A:%.*]], <i8 1, i8 undef> |
| ; CHECK-NEXT: [[S:%.*]] = select <2 x i1> [[C]], <2 x i8> [[A]], <2 x i8> <i8 4, i8 5> |
| ; CHECK-NEXT: [[AND:%.*]] = or <2 x i8> [[S]], [[B:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq <2 x i8> [[AND]], zeroinitializer |
| ; CHECK-NEXT: ret <2 x i1> [[R]] |
| ; |
| %c = icmp sge <2 x i8> %a, <i8 1, i8 undef> |
| %s = select <2 x i1> %c, <2 x i8> %a, <2 x i8> <i8 4, i8 5> |
| %and = or <2 x i8> %s, %b |
| %r = icmp eq <2 x i8> %and, zeroinitializer |
| ret <2 x i1> %r |
| } |
| |
| define <2 x i1> @cmp_excludes_zero_with_nonsplat_vec_wpoison(<2 x i8> %a, <2 x i8> %b) { |
| ; CHECK-LABEL: @cmp_excludes_zero_with_nonsplat_vec_wpoison( |
| ; CHECK-NEXT: [[C:%.*]] = icmp sge <2 x i8> [[A:%.*]], <i8 1, i8 poison> |
| ; CHECK-NEXT: [[S:%.*]] = select <2 x i1> [[C]], <2 x i8> [[A]], <2 x i8> <i8 4, i8 5> |
| ; CHECK-NEXT: [[AND:%.*]] = or <2 x i8> [[S]], [[B:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq <2 x i8> [[AND]], zeroinitializer |
| ; CHECK-NEXT: ret <2 x i1> [[R]] |
| ; |
| %c = icmp sge <2 x i8> %a, <i8 1, i8 poison> |
| %s = select <2 x i1> %c, <2 x i8> %a, <2 x i8> <i8 4, i8 5> |
| %and = or <2 x i8> %s, %b |
| %r = icmp eq <2 x i8> %and, zeroinitializer |
| ret <2 x i1> %r |
| } |
| |
| define <2 x i1> @cmp_excludes_zero_with_nonsplat_vec_fail(<2 x i8> %a, <2 x i8> %b) { |
| ; CHECK-LABEL: @cmp_excludes_zero_with_nonsplat_vec_fail( |
| ; CHECK-NEXT: [[C:%.*]] = icmp sge <2 x i8> [[A:%.*]], <i8 0, i8 4> |
| ; CHECK-NEXT: [[S:%.*]] = select <2 x i1> [[C]], <2 x i8> [[A]], <2 x i8> <i8 4, i8 5> |
| ; CHECK-NEXT: [[AND:%.*]] = or <2 x i8> [[S]], [[B:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq <2 x i8> [[AND]], zeroinitializer |
| ; CHECK-NEXT: ret <2 x i1> [[R]] |
| ; |
| %c = icmp sge <2 x i8> %a, <i8 0, i8 4> |
| %s = select <2 x i1> %c, <2 x i8> %a, <2 x i8> <i8 4, i8 5> |
| %and = or <2 x i8> %s, %b |
| %r = icmp eq <2 x i8> %and, zeroinitializer |
| ret <2 x i1> %r |
| } |
| |
| define i1 @sub_via_non_eq(i8 %x, i8 %y) { |
| ; CHECK-LABEL: @sub_via_non_eq( |
| ; CHECK-NEXT: [[NE:%.*]] = icmp ne i8 [[X:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[NE]]) |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %ne = icmp ne i8 %x, 0 |
| call void @llvm.assume(i1 %ne) |
| %shl = shl nuw i8 %x, 3 |
| %sub = sub i8 %x, %shl |
| %cmp = icmp eq i8 %sub, 0 |
| ret i1 %cmp |
| } |
| |
| ; Test mismatch of ptrtoints type and pointer size |
| define i1 @recursiveGEP_orcmp_truncPtr(ptr %val1, i32 %val2) { |
| ; CHECK-LABEL: @recursiveGEP_orcmp_truncPtr( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: br label [[WHILE_COND_I:%.*]] |
| ; CHECK: while.cond.i: |
| ; CHECK-NEXT: [[A_PN_I:%.*]] = phi ptr [ [[TEST_0_I:%.*]], [[WHILE_COND_I]] ], [ [[VAL1:%.*]], [[ENTRY:%.*]] ] |
| ; CHECK-NEXT: [[TEST_0_I]] = getelementptr inbounds i8, ptr [[A_PN_I]], i64 1 |
| ; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[TEST_0_I]], align 2 |
| ; CHECK-NEXT: [[CMP3_NOT_I:%.*]] = icmp eq i8 [[TMP0]], 0 |
| ; CHECK-NEXT: br i1 [[CMP3_NOT_I]], label [[WHILE_END_I:%.*]], label [[WHILE_COND_I]] |
| ; CHECK: while.end.i: |
| ; CHECK-NEXT: [[SUB_PTR_LHS_CAST_I:%.*]] = ptrtoint ptr [[TEST_0_I]] to i32 |
| ; CHECK-NEXT: [[SUB_PTR_RHS_CAST_I:%.*]] = ptrtoint ptr [[VAL1]] to i32 |
| ; CHECK-NEXT: [[SUB_PTR_SUB_I:%.*]] = sub i32 [[SUB_PTR_LHS_CAST_I]], [[SUB_PTR_RHS_CAST_I]] |
| ; CHECK-NEXT: [[ORVAL:%.*]] = or i32 [[VAL2:%.*]], [[SUB_PTR_SUB_I]] |
| ; CHECK-NEXT: [[BOOL:%.*]] = icmp eq i32 [[ORVAL]], 0 |
| ; CHECK-NEXT: ret i1 [[BOOL]] |
| ; |
| entry: |
| br label %while.cond.i |
| |
| while.cond.i: |
| %a.pn.i = phi ptr [ %test.0.i, %while.cond.i ], [ %val1, %entry ] |
| %test.0.i = getelementptr inbounds i8, ptr %a.pn.i, i64 1 |
| %0 = load i8, ptr %test.0.i, align 2 |
| %cmp3.not.i = icmp eq i8 %0, 0 |
| br i1 %cmp3.not.i, label %while.end.i, label %while.cond.i |
| |
| while.end.i: |
| %sub.ptr.lhs.cast.i = ptrtoint ptr %test.0.i to i32 |
| %sub.ptr.rhs.cast.i = ptrtoint ptr %val1 to i32 |
| %sub.ptr.sub.i = sub i32 %sub.ptr.lhs.cast.i, %sub.ptr.rhs.cast.i |
| %orval = or i32 %val2, %sub.ptr.sub.i |
| %bool = icmp eq i32 %orval, 0 |
| ret i1 %bool |
| } |
| |
| define i1 @check_get_vector_length(i32 %x, i32 %y) { |
| ; CHECK-LABEL: @check_get_vector_length( |
| ; CHECK-NEXT: [[NE:%.*]] = icmp ne i32 [[X:%.*]], 0 |
| ; CHECK-NEXT: br i1 [[NE]], label [[TRUE:%.*]], label [[FALSE:%.*]] |
| ; CHECK: true: |
| ; CHECK-NEXT: [[Z:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[X]], i32 1, i1 true) |
| ; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i32 [[Z]], [[Y:%.*]] |
| ; CHECK-NEXT: ret i1 [[CMP0]] |
| ; CHECK: false: |
| ; CHECK-NEXT: ret i1 [[NE]] |
| ; |
| %ne = icmp ne i32 %x, 0 |
| br i1 %ne, label %true, label %false |
| true: |
| %z = call i32 @llvm.experimental.get.vector.length.i32(i32 %x, i32 1, i1 true) |
| %cmp0 = icmp ugt i32 %z, %y |
| %cmp1 = icmp eq i32 %y, 0 |
| %r = or i1 %cmp0, %cmp1 |
| ret i1 %r |
| false: |
| ret i1 %ne |
| } |
| |
| define <2 x i1> @range_metadata_vec(ptr %p, <2 x i32> %x) { |
| ; CHECK-LABEL: @range_metadata_vec( |
| ; CHECK-NEXT: ret <2 x i1> splat (i1 true) |
| ; |
| %v = load <2 x i32>, ptr %p, !range !{i32 1, i32 100} |
| %or = or <2 x i32> %v, %x |
| %cmp = icmp ne <2 x i32> %or, zeroinitializer |
| ret <2 x i1> %cmp |
| } |
| |
| define i1 @range_attr(i8 range(i8 1, 0) %x, i8 %y) { |
| ; CHECK-LABEL: @range_attr( |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %or = or i8 %y, %x |
| %cmp = icmp eq i8 %or, 0 |
| ret i1 %cmp |
| } |
| |
| define i1 @neg_range_attr(i8 range(i8 -1, 1) %x, i8 %y) { |
| ; CHECK-LABEL: @neg_range_attr( |
| ; CHECK-NEXT: [[OR:%.*]] = or i8 [[Y:%.*]], [[X:%.*]] |
| ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[OR]], 0 |
| ; CHECK-NEXT: ret i1 [[CMP]] |
| ; |
| %or = or i8 %y, %x |
| %cmp = icmp eq i8 %or, 0 |
| ret i1 %cmp |
| } |
| |
| declare range(i8 1, 0) i8 @returns_non_zero_range_helper() |
| declare range(i8 -1, 1) i8 @returns_contain_zero_range_helper() |
| |
| define i1 @range_return(i8 %y) { |
| ; CHECK-LABEL: @range_return( |
| ; CHECK-NEXT: [[X:%.*]] = call i8 @returns_non_zero_range_helper() |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %x = call i8 @returns_non_zero_range_helper() |
| %or = or i8 %y, %x |
| %cmp = icmp eq i8 %or, 0 |
| ret i1 %cmp |
| } |
| |
| define i1 @neg_range_return(i8 %y) { |
| ; CHECK-LABEL: @neg_range_return( |
| ; CHECK-NEXT: [[X:%.*]] = call i8 @returns_contain_zero_range_helper() |
| ; CHECK-NEXT: [[OR:%.*]] = or i8 [[Y:%.*]], [[X]] |
| ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[OR]], 0 |
| ; CHECK-NEXT: ret i1 [[CMP]] |
| ; |
| %x = call i8 @returns_contain_zero_range_helper() |
| %or = or i8 %y, %x |
| %cmp = icmp eq i8 %or, 0 |
| ret i1 %cmp |
| } |
| |
| declare i8 @returns_i8_helper() |
| |
| define i1 @range_call(i8 %y) { |
| ; CHECK-LABEL: @range_call( |
| ; CHECK-NEXT: [[X:%.*]] = call range(i8 1, 0) i8 @returns_i8_helper() |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %x = call range(i8 1, 0) i8 @returns_i8_helper() |
| %or = or i8 %y, %x |
| %cmp = icmp eq i8 %or, 0 |
| ret i1 %cmp |
| } |
| |
| define i1 @neg_range_call(i8 %y) { |
| ; CHECK-LABEL: @neg_range_call( |
| ; CHECK-NEXT: [[X:%.*]] = call range(i8 -1, 1) i8 @returns_i8_helper() |
| ; CHECK-NEXT: [[OR:%.*]] = or i8 [[Y:%.*]], [[X]] |
| ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[OR]], 0 |
| ; CHECK-NEXT: ret i1 [[CMP]] |
| ; |
| %x = call range(i8 -1, 1) i8 @returns_i8_helper() |
| %or = or i8 %y, %x |
| %cmp = icmp eq i8 %or, 0 |
| ret i1 %cmp |
| } |
| |
| define <2 x i1> @range_attr_vec(<2 x i8> range(i8 1, 0) %x, <2 x i8> %y) { |
| ; CHECK-LABEL: @range_attr_vec( |
| ; CHECK-NEXT: ret <2 x i1> splat (i1 true) |
| ; |
| %or = or <2 x i8> %y, %x |
| %cmp = icmp ne <2 x i8> %or, zeroinitializer |
| ret <2 x i1> %cmp |
| } |
| |
| define <2 x i1> @neg_range_attr_vec(<2 x i8> range(i8 -1, 1) %x, <2 x i8> %y) { |
| ; CHECK-LABEL: @neg_range_attr_vec( |
| ; CHECK-NEXT: [[OR:%.*]] = or <2 x i8> [[Y:%.*]], [[X:%.*]] |
| ; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i8> [[OR]], zeroinitializer |
| ; CHECK-NEXT: ret <2 x i1> [[CMP]] |
| ; |
| %or = or <2 x i8> %y, %x |
| %cmp = icmp ne <2 x i8> %or, zeroinitializer |
| ret <2 x i1> %cmp |
| } |
| |
| declare range(i8 1, 0) <2 x i8> @returns_non_zero_range_helper_vec() |
| declare range(i8 -1, 1) <2 x i8> @returns_contain_zero_range_helper_vec() |
| |
| define <2 x i1> @range_return_vec(<2 x i8> %y) { |
| ; CHECK-LABEL: @range_return_vec( |
| ; CHECK-NEXT: [[X:%.*]] = call <2 x i8> @returns_non_zero_range_helper_vec() |
| ; CHECK-NEXT: ret <2 x i1> splat (i1 true) |
| ; |
| %x = call <2 x i8> @returns_non_zero_range_helper_vec() |
| %or = or <2 x i8> %y, %x |
| %cmp = icmp ne <2 x i8> %or, zeroinitializer |
| ret <2 x i1> %cmp |
| } |
| |
| define <2 x i1> @neg_range_return_vec(<2 x i8> %y) { |
| ; CHECK-LABEL: @neg_range_return_vec( |
| ; CHECK-NEXT: [[X:%.*]] = call <2 x i8> @returns_contain_zero_range_helper_vec() |
| ; CHECK-NEXT: [[OR:%.*]] = or <2 x i8> [[Y:%.*]], [[X]] |
| ; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i8> [[OR]], zeroinitializer |
| ; CHECK-NEXT: ret <2 x i1> [[CMP]] |
| ; |
| %x = call <2 x i8> @returns_contain_zero_range_helper_vec() |
| %or = or <2 x i8> %y, %x |
| %cmp = icmp ne <2 x i8> %or, zeroinitializer |
| ret <2 x i1> %cmp |
| } |
| |
| declare <2 x i8> @returns_i8_helper_vec() |
| |
| define <2 x i1> @range_call_vec(<2 x i8> %y) { |
| ; CHECK-LABEL: @range_call_vec( |
| ; CHECK-NEXT: [[X:%.*]] = call range(i8 1, 0) <2 x i8> @returns_i8_helper_vec() |
| ; CHECK-NEXT: ret <2 x i1> splat (i1 true) |
| ; |
| %x = call range(i8 1, 0) <2 x i8> @returns_i8_helper_vec() |
| %or = or <2 x i8> %y, %x |
| %cmp = icmp ne <2 x i8> %or, zeroinitializer |
| ret <2 x i1> %cmp |
| } |
| |
| define <2 x i1> @neg_range_call_vec(<2 x i8> %y) { |
| ; CHECK-LABEL: @neg_range_call_vec( |
| ; CHECK-NEXT: [[X:%.*]] = call range(i8 -1, 1) <2 x i8> @returns_i8_helper_vec() |
| ; CHECK-NEXT: [[OR:%.*]] = or <2 x i8> [[Y:%.*]], [[X]] |
| ; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i8> [[OR]], zeroinitializer |
| ; CHECK-NEXT: ret <2 x i1> [[CMP]] |
| ; |
| %x = call range(i8 -1, 1) <2 x i8> @returns_i8_helper_vec() |
| %or = or <2 x i8> %y, %x |
| %cmp = icmp ne <2 x i8> %or, zeroinitializer |
| ret <2 x i1> %cmp |
| } |
| |
| define i1 @trunc_nsw_non_zero(i8 %x) { |
| ; CHECK-LABEL: @trunc_nsw_non_zero( |
| ; CHECK-NEXT: [[X_NE_Z:%.*]] = icmp ne i8 [[X:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[X_NE_Z]]) |
| ; CHECK-NEXT: ret i1 true |
| ; |
| %x_ne_z = icmp ne i8 %x, 0 |
| call void @llvm.assume(i1 %x_ne_z) |
| %v = trunc nsw i8 %x to i4 |
| %r = icmp ne i4 %v, 0 |
| ret i1 %r |
| } |
| |
| define i1 @trunc_nuw_non_zero(i8 %xx) { |
| ; CHECK-LABEL: @trunc_nuw_non_zero( |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %x = add nuw i8 %xx, 1 |
| %v = trunc nuw i8 %x to i4 |
| %r = icmp eq i4 %v, 0 |
| ret i1 %r |
| } |
| |
| define i1 @trunc_non_zero_fail(i8 %x) { |
| ; CHECK-LABEL: @trunc_non_zero_fail( |
| ; CHECK-NEXT: [[X_NE_Z:%.*]] = icmp ne i8 [[X:%.*]], 0 |
| ; CHECK-NEXT: call void @llvm.assume(i1 [[X_NE_Z]]) |
| ; CHECK-NEXT: [[R:%.*]] = trunc i8 [[X]] to i1 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %x_ne_z = icmp ne i8 %x, 0 |
| call void @llvm.assume(i1 %x_ne_z) |
| %r = trunc i8 %x to i1 |
| ret i1 %r |
| } |
| |
| define i1 @trunc_nsw_nuw_non_zero_fail(i8 %xx) { |
| ; CHECK-LABEL: @trunc_nsw_nuw_non_zero_fail( |
| ; CHECK-NEXT: [[X:%.*]] = add nsw i8 [[XX:%.*]], 1 |
| ; CHECK-NEXT: [[V:%.*]] = trunc nuw nsw i8 [[X]] to i4 |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i4 [[V]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %x = add nsw i8 %xx, 1 |
| %v = trunc nsw nuw i8 %x to i4 |
| %r = icmp eq i4 %v, 0 |
| ret i1 %r |
| } |
| |
| define <4 x i1> @vec_reverse_non_zero(<4 x i8> %xx) { |
| ; CHECK-LABEL: @vec_reverse_non_zero( |
| ; CHECK-NEXT: ret <4 x i1> zeroinitializer |
| ; |
| %x = add nuw <4 x i8> %xx, <i8 1, i8 1, i8 1, i8 1> |
| %rev = call <4 x i8> @llvm.vector.reverse(<4 x i8> %x) |
| %r = icmp eq <4 x i8> %rev, zeroinitializer |
| ret <4 x i1> %r |
| } |
| |
| define <4 x i1> @vec_reverse_non_zero_fail(<4 x i8> %xx) { |
| ; CHECK-LABEL: @vec_reverse_non_zero_fail( |
| ; CHECK-NEXT: [[X:%.*]] = add nuw <4 x i8> [[XX:%.*]], <i8 1, i8 0, i8 1, i8 1> |
| ; CHECK-NEXT: [[REV:%.*]] = call <4 x i8> @llvm.vector.reverse.v4i8(<4 x i8> [[X]]) |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq <4 x i8> [[REV]], zeroinitializer |
| ; CHECK-NEXT: ret <4 x i1> [[R]] |
| ; |
| %x = add nuw <4 x i8> %xx, <i8 1, i8 0, i8 1, i8 1> |
| %rev = call <4 x i8> @llvm.vector.reverse(<4 x i8> %x) |
| %r = icmp eq <4 x i8> %rev, zeroinitializer |
| ret <4 x i1> %r |
| } |
| |
| define i1 @vec_reverse_non_zero_demanded(<4 x i8> %xx) { |
| ; CHECK-LABEL: @vec_reverse_non_zero_demanded( |
| ; CHECK-NEXT: ret i1 false |
| ; |
| %x = add nuw <4 x i8> %xx, <i8 1, i8 0, i8 0, i8 0> |
| %rev = call <4 x i8> @llvm.vector.reverse(<4 x i8> %x) |
| %ele = extractelement <4 x i8> %rev, i64 3 |
| %r = icmp eq i8 %ele, 0 |
| ret i1 %r |
| } |
| |
| define i1 @vec_reverse_non_zero_demanded_fail(<4 x i8> %xx) { |
| ; CHECK-LABEL: @vec_reverse_non_zero_demanded_fail( |
| ; CHECK-NEXT: [[X:%.*]] = add nuw <4 x i8> [[XX:%.*]], <i8 1, i8 0, i8 0, i8 0> |
| ; CHECK-NEXT: [[REV:%.*]] = call <4 x i8> @llvm.vector.reverse.v4i8(<4 x i8> [[X]]) |
| ; CHECK-NEXT: [[ELE:%.*]] = extractelement <4 x i8> [[REV]], i64 2 |
| ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[ELE]], 0 |
| ; CHECK-NEXT: ret i1 [[R]] |
| ; |
| %x = add nuw <4 x i8> %xx, <i8 1, i8 0, i8 0, i8 0> |
| %rev = call <4 x i8> @llvm.vector.reverse(<4 x i8> %x) |
| %ele = extractelement <4 x i8> %rev, i64 2 |
| %r = icmp eq i8 %ele, 0 |
| ret i1 %r |
| } |
| |
| declare i32 @llvm.experimental.get.vector.length.i32(i32, i32, i1) |