blob: b9c92288b18c1c7c7b6074c8c1135405f11a908e [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -S -passes=indvars < %s | FileCheck %s
define void @optimize_trap(i32 %block_size) {
; CHECK-LABEL: define void @optimize_trap(
; CHECK-SAME: i32 [[BLOCK_SIZE:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[FOO_ARR:%.*]] = alloca [2 x i8], align 16
; CHECK-NEXT: [[BAR_ARR:%.*]] = alloca [2 x i8], align 16
; CHECK-NEXT: call void @x(ptr nonnull [[FOO_ARR]])
; CHECK-NEXT: [[CMP14_NOT:%.*]] = icmp eq i32 [[BLOCK_SIZE]], 0
; CHECK-NEXT: br i1 [[CMP14_NOT]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY_PREHEADER:.*]]
; CHECK: [[FOR_BODY_PREHEADER]]:
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[BLOCK_SIZE]], -1
; CHECK-NEXT: [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP1]], i32 3)
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 3, [[UMIN]]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_COND_CLEANUP_LOOPEXIT:.*]]:
; CHECK-NEXT: br label %[[FOR_COND_CLEANUP]]
; CHECK: [[FOR_COND_CLEANUP]]:
; CHECK-NEXT: call void @x(ptr nonnull [[BAR_ARR]])
; CHECK-NEXT: ret void
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[I_015:%.*]] = phi i32 [ [[INC:%.*]], %[[IF_END4:.*]] ], [ 0, %[[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: br i1 [[TMP2]], label %[[IF_THEN:.*]], label %[[IF_END4]]
; CHECK: [[IF_THEN]]:
; CHECK-NEXT: call void @llvm.trap()
; CHECK-NEXT: unreachable
; CHECK: [[IF_END4]]:
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1024 x i8], ptr [[FOO_ARR]], i64 0, i32 [[I_015]]
; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[TMP4:%.*]] = xor i8 [[TMP3]], 54
; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw [1025 x i8], ptr [[BAR_ARR]], i64 0, i32 [[I_015]]
; CHECK-NEXT: store i8 [[TMP4]], ptr [[ARRAYIDX7]], align 1
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_015]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC]], [[BLOCK_SIZE]]
; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP_LOOPEXIT]]
;
entry:
%foo_arr = alloca [2 x i8], align 16
%bar_arr = alloca [2 x i8], align 16
call void @x(ptr nonnull %foo_arr)
%cmp14.not = icmp eq i32 %block_size, 0
br i1 %cmp14.not, label %for.cond.cleanup, label %for.body.preheader
for.body.preheader: ; preds = %entry
br label %for.body
for.cond.cleanup.loopexit: ; preds = %if.end4
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
call void @x(ptr nonnull %bar_arr)
ret void
for.body: ; preds = %for.body.preheader, %if.end4
%i.015 = phi i32 [ %inc, %if.end4 ], [ 0, %for.body.preheader ]
%cmp1 = icmp samesign ugt i32 %i.015, 2
br i1 %cmp1, label %if.then, label %if.end4
if.then: ; preds = %for.body
call void @llvm.trap()
unreachable
if.end4: ; preds = %for.body
%arrayidx = getelementptr inbounds nuw [1024 x i8], ptr %foo_arr, i64 0, i32 %i.015
%0 = load i8, ptr %arrayidx, align 1
%1 = xor i8 %0, 54
%arrayidx7 = getelementptr inbounds nuw [1025 x i8], ptr %bar_arr, i64 0, i32 %i.015
store i8 %1, ptr %arrayidx7, align 1
%inc = add nuw nsw i32 %i.015, 1
%cmp = icmp ult i32 %inc, %block_size
br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit
}
define void @no_optimize_atomic(i32 %block_size) {
; CHECK-LABEL: define void @no_optimize_atomic(
; CHECK-SAME: i32 [[BLOCK_SIZE:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[FOO_ARR:%.*]] = alloca [2 x i8], align 16
; CHECK-NEXT: [[BAR_ARR:%.*]] = alloca [2 x i8], align 16
; CHECK-NEXT: call void @x(ptr nonnull [[FOO_ARR]])
; CHECK-NEXT: [[CMP14_NOT:%.*]] = icmp eq i32 [[BLOCK_SIZE]], 0
; CHECK-NEXT: br i1 [[CMP14_NOT]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY_PREHEADER:.*]]
; CHECK: [[FOR_BODY_PREHEADER]]:
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_COND_CLEANUP_LOOPEXIT:.*]]:
; CHECK-NEXT: br label %[[FOR_COND_CLEANUP]]
; CHECK: [[FOR_COND_CLEANUP]]:
; CHECK-NEXT: call void @x(ptr nonnull [[BAR_ARR]])
; CHECK-NEXT: ret void
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[I_015:%.*]] = phi i32 [ [[INC:%.*]], %[[IF_END4:.*]] ], [ 0, %[[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: [[CMP1:%.*]] = icmp samesign ugt i32 [[I_015]], 2
; CHECK-NEXT: br i1 [[CMP1]], label %[[IF_THEN:.*]], label %[[IF_END4]]
; CHECK: [[IF_THEN]]:
; CHECK-NEXT: call void @llvm.trap()
; CHECK-NEXT: unreachable
; CHECK: [[IF_END4]]:
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1024 x i8], ptr [[FOO_ARR]], i64 0, i32 [[I_015]]
; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[TMP4:%.*]] = xor i8 [[TMP3]], 54
; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw [1025 x i8], ptr [[BAR_ARR]], i64 0, i32 [[I_015]]
; CHECK-NEXT: store atomic i8 [[TMP4]], ptr [[ARRAYIDX7]] unordered, align 1
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_015]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC]], [[BLOCK_SIZE]]
; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP_LOOPEXIT]]
;
entry:
%foo_arr = alloca [2 x i8], align 16
%bar_arr = alloca [2 x i8], align 16
call void @x(ptr nonnull %foo_arr)
%cmp14.not = icmp eq i32 %block_size, 0
br i1 %cmp14.not, label %for.cond.cleanup, label %for.body.preheader
for.body.preheader: ; preds = %entry
br label %for.body
for.cond.cleanup.loopexit: ; preds = %if.end4
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
call void @x(ptr nonnull %bar_arr)
ret void
for.body: ; preds = %for.body.preheader, %if.end4
%i.015 = phi i32 [ %inc, %if.end4 ], [ 0, %for.body.preheader ]
%cmp1 = icmp samesign ugt i32 %i.015, 2
br i1 %cmp1, label %if.then, label %if.end4
if.then: ; preds = %for.body
call void @llvm.trap()
unreachable
if.end4: ; preds = %for.body
%arrayidx = getelementptr inbounds nuw [1024 x i8], ptr %foo_arr, i64 0, i32 %i.015
%0 = load i8, ptr %arrayidx, align 1
%1 = xor i8 %0, 54
%arrayidx7 = getelementptr inbounds nuw [1025 x i8], ptr %bar_arr, i64 0, i32 %i.015
store atomic i8 %1, ptr %arrayidx7 unordered, align 1
%inc = add nuw nsw i32 %i.015, 1
%cmp = icmp ult i32 %inc, %block_size
br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit
}
define void @no_optimize_volatile(i32 %block_size) {
; CHECK-LABEL: define void @no_optimize_volatile(
; CHECK-SAME: i32 [[BLOCK_SIZE:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[FOO_ARR:%.*]] = alloca [2 x i8], align 16
; CHECK-NEXT: [[BAR_ARR:%.*]] = alloca [2 x i8], align 16
; CHECK-NEXT: call void @x(ptr nonnull [[FOO_ARR]])
; CHECK-NEXT: [[CMP14_NOT:%.*]] = icmp eq i32 [[BLOCK_SIZE]], 0
; CHECK-NEXT: br i1 [[CMP14_NOT]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY_PREHEADER:.*]]
; CHECK: [[FOR_BODY_PREHEADER]]:
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_COND_CLEANUP_LOOPEXIT:.*]]:
; CHECK-NEXT: br label %[[FOR_COND_CLEANUP]]
; CHECK: [[FOR_COND_CLEANUP]]:
; CHECK-NEXT: call void @x(ptr nonnull [[BAR_ARR]])
; CHECK-NEXT: ret void
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[I_015:%.*]] = phi i32 [ [[INC:%.*]], %[[IF_END4:.*]] ], [ 0, %[[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: [[CMP1:%.*]] = icmp samesign ugt i32 [[I_015]], 2
; CHECK-NEXT: br i1 [[CMP1]], label %[[IF_THEN:.*]], label %[[IF_END4]]
; CHECK: [[IF_THEN]]:
; CHECK-NEXT: call void @llvm.trap()
; CHECK-NEXT: unreachable
; CHECK: [[IF_END4]]:
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1024 x i8], ptr [[FOO_ARR]], i64 0, i32 [[I_015]]
; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[TMP4:%.*]] = xor i8 [[TMP3]], 54
; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw [1025 x i8], ptr [[BAR_ARR]], i64 0, i32 [[I_015]]
; CHECK-NEXT: store volatile i8 [[TMP4]], ptr [[ARRAYIDX7]], align 1
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_015]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC]], [[BLOCK_SIZE]]
; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP_LOOPEXIT]]
;
entry:
%foo_arr = alloca [2 x i8], align 16
%bar_arr = alloca [2 x i8], align 16
call void @x(ptr nonnull %foo_arr)
%cmp14.not = icmp eq i32 %block_size, 0
br i1 %cmp14.not, label %for.cond.cleanup, label %for.body.preheader
for.body.preheader: ; preds = %entry
br label %for.body
for.cond.cleanup.loopexit: ; preds = %if.end4
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
call void @x(ptr nonnull %bar_arr)
ret void
for.body: ; preds = %for.body.preheader, %if.end4
%i.015 = phi i32 [ %inc, %if.end4 ], [ 0, %for.body.preheader ]
%cmp1 = icmp samesign ugt i32 %i.015, 2
br i1 %cmp1, label %if.then, label %if.end4
if.then: ; preds = %for.body
call void @llvm.trap()
unreachable
if.end4: ; preds = %for.body
%arrayidx = getelementptr inbounds nuw [1024 x i8], ptr %foo_arr, i64 0, i32 %i.015
%0 = load i8, ptr %arrayidx, align 1
%1 = xor i8 %0, 54
%arrayidx7 = getelementptr inbounds nuw [1025 x i8], ptr %bar_arr, i64 0, i32 %i.015
store volatile i8 %1, ptr %arrayidx7, align 1
%inc = add nuw nsw i32 %i.015, 1
%cmp = icmp ult i32 %inc, %block_size
br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit
}
define void @no_optimize_call(i32 %block_size) {
; CHECK-LABEL: define void @no_optimize_call(
; CHECK-SAME: i32 [[BLOCK_SIZE:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[FOO_ARR:%.*]] = alloca [2 x i8], align 16
; CHECK-NEXT: [[BAR_ARR:%.*]] = alloca [2 x i8], align 16
; CHECK-NEXT: call void @x(ptr nonnull [[FOO_ARR]])
; CHECK-NEXT: [[CMP14_NOT:%.*]] = icmp eq i32 [[BLOCK_SIZE]], 0
; CHECK-NEXT: br i1 [[CMP14_NOT]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY_PREHEADER:.*]]
; CHECK: [[FOR_BODY_PREHEADER]]:
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_COND_CLEANUP_LOOPEXIT:.*]]:
; CHECK-NEXT: br label %[[FOR_COND_CLEANUP]]
; CHECK: [[FOR_COND_CLEANUP]]:
; CHECK-NEXT: call void @x(ptr nonnull [[BAR_ARR]])
; CHECK-NEXT: ret void
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[I_015:%.*]] = phi i32 [ [[INC:%.*]], %[[IF_END4:.*]] ], [ 0, %[[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: [[CMP1:%.*]] = icmp samesign ugt i32 [[I_015]], 2
; CHECK-NEXT: br i1 [[CMP1]], label %[[IF_THEN:.*]], label %[[IF_END4]]
; CHECK: [[IF_THEN]]:
; CHECK-NEXT: call void @llvm.trap()
; CHECK-NEXT: unreachable
; CHECK: [[IF_END4]]:
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1024 x i8], ptr [[FOO_ARR]], i64 0, i32 [[I_015]]
; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[TMP4:%.*]] = xor i8 [[TMP3]], 54
; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw [1025 x i8], ptr [[BAR_ARR]], i64 0, i32 [[I_015]]
; CHECK-NEXT: call void @x(ptr null)
; CHECK-NEXT: store volatile i8 [[TMP4]], ptr [[ARRAYIDX7]], align 1
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_015]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC]], [[BLOCK_SIZE]]
; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP_LOOPEXIT]]
;
entry:
%foo_arr = alloca [2 x i8], align 16
%bar_arr = alloca [2 x i8], align 16
call void @x(ptr nonnull %foo_arr)
%cmp14.not = icmp eq i32 %block_size, 0
br i1 %cmp14.not, label %for.cond.cleanup, label %for.body.preheader
for.body.preheader: ; preds = %entry
br label %for.body
for.cond.cleanup.loopexit: ; preds = %if.end4
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
call void @x(ptr nonnull %bar_arr)
ret void
for.body: ; preds = %for.body.preheader, %if.end4
%i.015 = phi i32 [ %inc, %if.end4 ], [ 0, %for.body.preheader ]
%cmp1 = icmp samesign ugt i32 %i.015, 2
br i1 %cmp1, label %if.then, label %if.end4
if.then: ; preds = %for.body
call void @llvm.trap()
unreachable
if.end4: ; preds = %for.body
%arrayidx = getelementptr inbounds nuw [1024 x i8], ptr %foo_arr, i64 0, i32 %i.015
%0 = load i8, ptr %arrayidx, align 1
%1 = xor i8 %0, 54
%arrayidx7 = getelementptr inbounds nuw [1025 x i8], ptr %bar_arr, i64 0, i32 %i.015
call void @x(ptr null)
store volatile i8 %1, ptr %arrayidx7, align 1
%inc = add nuw nsw i32 %i.015, 1
%cmp = icmp ult i32 %inc, %block_size
br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit
}
define void @optimize_ubsan_trap(i32 %block_size) {
; CHECK-LABEL: define void @optimize_ubsan_trap(
; CHECK-SAME: i32 [[BLOCK_SIZE:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[FOO_ARR:%.*]] = alloca [2 x i8], align 16
; CHECK-NEXT: [[BAR_ARR:%.*]] = alloca [2 x i8], align 16
; CHECK-NEXT: call void @x(ptr nonnull [[FOO_ARR]])
; CHECK-NEXT: [[CMP14_NOT:%.*]] = icmp eq i32 [[BLOCK_SIZE]], 0
; CHECK-NEXT: br i1 [[CMP14_NOT]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY_PREHEADER:.*]]
; CHECK: [[FOR_BODY_PREHEADER]]:
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[BLOCK_SIZE]], -1
; CHECK-NEXT: [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP1]], i32 3)
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 3, [[UMIN]]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_COND_CLEANUP_LOOPEXIT:.*]]:
; CHECK-NEXT: br label %[[FOR_COND_CLEANUP]]
; CHECK: [[FOR_COND_CLEANUP]]:
; CHECK-NEXT: call void @x(ptr nonnull [[BAR_ARR]])
; CHECK-NEXT: ret void
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[I_015:%.*]] = phi i32 [ [[INC:%.*]], %[[IF_END4:.*]] ], [ 0, %[[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: br i1 [[TMP2]], label %[[IF_THEN:.*]], label %[[IF_END4]]
; CHECK: [[IF_THEN]]:
; CHECK-NEXT: call void @llvm.ubsantrap(i8 1)
; CHECK-NEXT: unreachable
; CHECK: [[IF_END4]]:
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1024 x i8], ptr [[FOO_ARR]], i64 0, i32 [[I_015]]
; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[TMP4:%.*]] = xor i8 [[TMP3]], 54
; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw [1025 x i8], ptr [[BAR_ARR]], i64 0, i32 [[I_015]]
; CHECK-NEXT: store i8 [[TMP4]], ptr [[ARRAYIDX7]], align 1
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_015]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC]], [[BLOCK_SIZE]]
; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP_LOOPEXIT]]
;
entry:
%foo_arr = alloca [2 x i8], align 16
%bar_arr = alloca [2 x i8], align 16
call void @x(ptr nonnull %foo_arr)
%cmp14.not = icmp eq i32 %block_size, 0
br i1 %cmp14.not, label %for.cond.cleanup, label %for.body.preheader
for.body.preheader: ; preds = %entry
br label %for.body
for.cond.cleanup.loopexit: ; preds = %if.end4
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
call void @x(ptr nonnull %bar_arr)
ret void
for.body: ; preds = %for.body.preheader, %if.end4
%i.015 = phi i32 [ %inc, %if.end4 ], [ 0, %for.body.preheader ]
%cmp1 = icmp samesign ugt i32 %i.015, 2
br i1 %cmp1, label %if.then, label %if.end4
if.then: ; preds = %for.body
call void @llvm.ubsantrap(i8 1)
unreachable
if.end4: ; preds = %for.body
%arrayidx = getelementptr inbounds nuw [1024 x i8], ptr %foo_arr, i64 0, i32 %i.015
%0 = load i8, ptr %arrayidx, align 1
%1 = xor i8 %0, 54
%arrayidx7 = getelementptr inbounds nuw [1025 x i8], ptr %bar_arr, i64 0, i32 %i.015
store i8 %1, ptr %arrayidx7, align 1
%inc = add nuw nsw i32 %i.015, 1
%cmp = icmp ult i32 %inc, %block_size
br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit
}
define void @no_optimize_arbitrary_call(i32 %block_size) {
; CHECK-LABEL: define void @no_optimize_arbitrary_call(
; CHECK-SAME: i32 [[BLOCK_SIZE:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[FOO_ARR:%.*]] = alloca [2 x i8], align 16
; CHECK-NEXT: [[BAR_ARR:%.*]] = alloca [2 x i8], align 16
; CHECK-NEXT: call void @x(ptr nonnull [[FOO_ARR]])
; CHECK-NEXT: [[CMP14_NOT:%.*]] = icmp eq i32 [[BLOCK_SIZE]], 0
; CHECK-NEXT: br i1 [[CMP14_NOT]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY_PREHEADER:.*]]
; CHECK: [[FOR_BODY_PREHEADER]]:
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_COND_CLEANUP_LOOPEXIT:.*]]:
; CHECK-NEXT: br label %[[FOR_COND_CLEANUP]]
; CHECK: [[FOR_COND_CLEANUP]]:
; CHECK-NEXT: call void @x(ptr nonnull [[BAR_ARR]])
; CHECK-NEXT: ret void
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[I_015:%.*]] = phi i32 [ [[INC:%.*]], %[[IF_END4:.*]] ], [ 0, %[[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: [[CMP1:%.*]] = icmp samesign ugt i32 [[I_015]], 2
; CHECK-NEXT: br i1 [[CMP1]], label %[[IF_THEN:.*]], label %[[IF_END4]]
; CHECK: [[IF_THEN]]:
; CHECK-NEXT: call void @noreturn_with_argmem(ptr [[FOO_ARR]])
; CHECK-NEXT: unreachable
; CHECK: [[IF_END4]]:
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1024 x i8], ptr [[FOO_ARR]], i64 0, i32 [[I_015]]
; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[TMP4:%.*]] = xor i8 [[TMP3]], 54
; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw [1025 x i8], ptr [[BAR_ARR]], i64 0, i32 [[I_015]]
; CHECK-NEXT: store i8 [[TMP4]], ptr [[ARRAYIDX7]], align 1
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_015]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC]], [[BLOCK_SIZE]]
; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP_LOOPEXIT]]
;
entry:
%foo_arr = alloca [2 x i8], align 16
%bar_arr = alloca [2 x i8], align 16
call void @x(ptr nonnull %foo_arr)
%cmp14.not = icmp eq i32 %block_size, 0
br i1 %cmp14.not, label %for.cond.cleanup, label %for.body.preheader
for.body.preheader: ; preds = %entry
br label %for.body
for.cond.cleanup.loopexit: ; preds = %if.end4
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
call void @x(ptr nonnull %bar_arr)
ret void
for.body: ; preds = %for.body.preheader, %if.end4
%i.015 = phi i32 [ %inc, %if.end4 ], [ 0, %for.body.preheader ]
%cmp1 = icmp samesign ugt i32 %i.015, 2
br i1 %cmp1, label %if.then, label %if.end4
if.then: ; preds = %for.body
call void @noreturn_with_argmem(ptr %foo_arr)
unreachable
if.end4: ; preds = %for.body
%arrayidx = getelementptr inbounds nuw [1024 x i8], ptr %foo_arr, i64 0, i32 %i.015
%0 = load i8, ptr %arrayidx, align 1
%1 = xor i8 %0, 54
%arrayidx7 = getelementptr inbounds nuw [1025 x i8], ptr %bar_arr, i64 0, i32 %i.015
store i8 %1, ptr %arrayidx7, align 1
%inc = add nuw nsw i32 %i.015, 1
%cmp = icmp ult i32 %inc, %block_size
br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit
}
define void @no_optimize_two_exits(i32 %block_size) {
; CHECK-LABEL: define void @no_optimize_two_exits(
; CHECK-SAME: i32 [[BLOCK_SIZE:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[FOO_ARR:%.*]] = alloca [2 x i8], align 16
; CHECK-NEXT: [[BAR_ARR:%.*]] = alloca [2 x i8], align 16
; CHECK-NEXT: call void @x(ptr nonnull [[FOO_ARR]])
; CHECK-NEXT: [[CMP14_NOT:%.*]] = icmp eq i32 [[BLOCK_SIZE]], 0
; CHECK-NEXT: br i1 [[CMP14_NOT]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY_PREHEADER:.*]]
; CHECK: [[FOR_BODY_PREHEADER]]:
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_COND_CLEANUP_LOOPEXIT:.*]]:
; CHECK-NEXT: br label %[[FOR_COND_CLEANUP]]
; CHECK: [[FOR_COND_CLEANUP]]:
; CHECK-NEXT: call void @x(ptr nonnull [[BAR_ARR]])
; CHECK-NEXT: ret void
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[I_015:%.*]] = phi i32 [ [[INC:%.*]], %[[IF_END4:.*]] ], [ 0, %[[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: [[P:%.*]] = call i1 @pred()
; CHECK-NEXT: br i1 [[P]], label %[[FOR_BODY_CONT:.*]], label %[[FOR_COND_CLEANUP_LOOPEXIT]]
; CHECK: [[FOR_BODY_CONT]]:
; CHECK-NEXT: [[CMP1:%.*]] = icmp samesign ugt i32 [[I_015]], 2
; CHECK-NEXT: br i1 [[CMP1]], label %[[IF_THEN:.*]], label %[[IF_END4]]
; CHECK: [[IF_THEN]]:
; CHECK-NEXT: call void @noreturn(ptr [[FOO_ARR]])
; CHECK-NEXT: unreachable
; CHECK: [[IF_END4]]:
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1024 x i8], ptr [[FOO_ARR]], i64 0, i32 [[I_015]]
; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[TMP0]], 54
; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw [1025 x i8], ptr [[BAR_ARR]], i64 0, i32 [[I_015]]
; CHECK-NEXT: store i8 [[TMP1]], ptr [[ARRAYIDX7]], align 1
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_015]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC]], [[BLOCK_SIZE]]
; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP_LOOPEXIT]]
;
entry:
%foo_arr = alloca [2 x i8], align 16
%bar_arr = alloca [2 x i8], align 16
call void @x(ptr nonnull %foo_arr)
%cmp14.not = icmp eq i32 %block_size, 0
br i1 %cmp14.not, label %for.cond.cleanup, label %for.body.preheader
for.body.preheader: ; preds = %entry
br label %for.body
for.cond.cleanup.loopexit: ; preds = %if.end4
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
call void @x(ptr nonnull %bar_arr)
ret void
for.body:
%i.015 = phi i32 [ %inc, %if.end4 ], [ 0, %for.body.preheader ]
%p = call i1 @pred()
br i1 %p, label %for.body.cont, label %for.cond.cleanup.loopexit
for.body.cont: ; preds = %for.body.preheader, %if.end4
%cmp1 = icmp samesign ugt i32 %i.015, 2
br i1 %cmp1, label %if.then, label %if.end4
if.then: ; preds = %for.body
call void @noreturn(ptr %foo_arr)
unreachable
if.end4: ; preds = %for.body
%arrayidx = getelementptr inbounds nuw [1024 x i8], ptr %foo_arr, i64 0, i32 %i.015
%0 = load i8, ptr %arrayidx, align 1
%1 = xor i8 %0, 54
%arrayidx7 = getelementptr inbounds nuw [1025 x i8], ptr %bar_arr, i64 0, i32 %i.015
store i8 %1, ptr %arrayidx7, align 1
%inc = add nuw nsw i32 %i.015, 1
%cmp = icmp ult i32 %inc, %block_size
br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit
}
define void @no_optimize_two_exits2(i32 %block_size) {
; CHECK-LABEL: define void @no_optimize_two_exits2(
; CHECK-SAME: i32 [[BLOCK_SIZE:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[FOO_ARR:%.*]] = alloca [2 x i8], align 16
; CHECK-NEXT: [[BAR_ARR:%.*]] = alloca [2 x i8], align 16
; CHECK-NEXT: call void @x(ptr nonnull [[FOO_ARR]])
; CHECK-NEXT: [[CMP14_NOT:%.*]] = icmp eq i32 [[BLOCK_SIZE]], 0
; CHECK-NEXT: br i1 [[CMP14_NOT]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY_PREHEADER:.*]]
; CHECK: [[FOR_BODY_PREHEADER]]:
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_COND_CLEANUP_LOOPEXIT:.*]]:
; CHECK-NEXT: br label %[[FOR_COND_CLEANUP]]
; CHECK: [[FOR_COND_CLEANUP]]:
; CHECK-NEXT: call void @x(ptr nonnull [[BAR_ARR]])
; CHECK-NEXT: ret void
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[I_015:%.*]] = phi i32 [ [[INC:%.*]], %[[IF_END4:.*]] ], [ 0, %[[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: [[CMP1:%.*]] = icmp samesign ugt i32 [[I_015]], 2
; CHECK-NEXT: br i1 [[CMP1]], label %[[IF_THEN:.*]], label %[[FOR_BODY_CONT:.*]]
; CHECK: [[FOR_BODY_CONT]]:
; CHECK-NEXT: [[P:%.*]] = call i1 @pred()
; CHECK-NEXT: br i1 [[P]], label %[[IF_END4]], label %[[FOR_COND_CLEANUP_LOOPEXIT]]
; CHECK: [[IF_THEN]]:
; CHECK-NEXT: call void @noreturn(ptr [[FOO_ARR]])
; CHECK-NEXT: unreachable
; CHECK: [[IF_END4]]:
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1024 x i8], ptr [[FOO_ARR]], i64 0, i32 [[I_015]]
; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[TMP0]], 54
; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw [1025 x i8], ptr [[BAR_ARR]], i64 0, i32 [[I_015]]
; CHECK-NEXT: store i8 [[TMP1]], ptr [[ARRAYIDX7]], align 1
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_015]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC]], [[BLOCK_SIZE]]
; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP_LOOPEXIT]]
;
entry:
%foo_arr = alloca [2 x i8], align 16
%bar_arr = alloca [2 x i8], align 16
call void @x(ptr nonnull %foo_arr)
%cmp14.not = icmp eq i32 %block_size, 0
br i1 %cmp14.not, label %for.cond.cleanup, label %for.body.preheader
for.body.preheader: ; preds = %entry
br label %for.body
for.cond.cleanup.loopexit: ; preds = %if.end4
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
call void @x(ptr nonnull %bar_arr)
ret void
for.body:
%i.015 = phi i32 [ %inc, %if.end4 ], [ 0, %for.body.preheader ]
%cmp1 = icmp samesign ugt i32 %i.015, 2
br i1 %cmp1, label %if.then, label %for.body.cont
for.body.cont: ; preds = %for.body.preheader, %if.end4
%p = call i1 @pred()
br i1 %p, label %if.end4, label %for.cond.cleanup.loopexit
if.then: ; preds = %for.body
call void @noreturn(ptr %foo_arr)
unreachable
if.end4: ; preds = %for.body
%arrayidx = getelementptr inbounds nuw [1024 x i8], ptr %foo_arr, i64 0, i32 %i.015
%0 = load i8, ptr %arrayidx, align 1
%1 = xor i8 %0, 54
%arrayidx7 = getelementptr inbounds nuw [1025 x i8], ptr %bar_arr, i64 0, i32 %i.015
store i8 %1, ptr %arrayidx7, align 1
%inc = add nuw nsw i32 %i.015, 1
%cmp = icmp ult i32 %inc, %block_size
br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit
}
define void @no_optimize_depdendent_ubsan_trap(i32 %block_size) {
; CHECK-LABEL: define void @no_optimize_depdendent_ubsan_trap(
; CHECK-SAME: i32 [[BLOCK_SIZE:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[FOO_ARR:%.*]] = alloca [2 x i8], align 16
; CHECK-NEXT: [[BAR_ARR:%.*]] = alloca [2 x i8], align 16
; CHECK-NEXT: call void @x(ptr nonnull [[FOO_ARR]])
; CHECK-NEXT: [[CMP14_NOT:%.*]] = icmp eq i32 [[BLOCK_SIZE]], 0
; CHECK-NEXT: br i1 [[CMP14_NOT]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY_PREHEADER:.*]]
; CHECK: [[FOR_BODY_PREHEADER]]:
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_COND_CLEANUP_LOOPEXIT:.*]]:
; CHECK-NEXT: br label %[[FOR_COND_CLEANUP]]
; CHECK: [[FOR_COND_CLEANUP]]:
; CHECK-NEXT: call void @x(ptr nonnull [[BAR_ARR]])
; CHECK-NEXT: ret void
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[I_015:%.*]] = phi i32 [ [[INC:%.*]], %[[IF_END4:.*]] ], [ 0, %[[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: [[CMP1:%.*]] = icmp samesign ugt i32 [[I_015]], 2
; CHECK-NEXT: br i1 [[CMP1]], label %[[IF_THEN:.*]], label %[[IF_END4]]
; CHECK: [[IF_THEN]]:
; CHECK-NEXT: [[I_015_LCSSA:%.*]] = phi i32 [ [[I_015]], %[[FOR_BODY]] ]
; CHECK-NEXT: call void @noreturn_with_i32(i32 [[I_015_LCSSA]])
; CHECK-NEXT: unreachable
; CHECK: [[IF_END4]]:
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1024 x i8], ptr [[FOO_ARR]], i64 0, i32 [[I_015]]
; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[TMP0]], 54
; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw [1025 x i8], ptr [[BAR_ARR]], i64 0, i32 [[I_015]]
; CHECK-NEXT: store i8 [[TMP1]], ptr [[ARRAYIDX7]], align 1
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_015]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC]], [[BLOCK_SIZE]]
; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP_LOOPEXIT]]
;
entry:
%foo_arr = alloca [2 x i8], align 16
%bar_arr = alloca [2 x i8], align 16
call void @x(ptr nonnull %foo_arr)
%cmp14.not = icmp eq i32 %block_size, 0
br i1 %cmp14.not, label %for.cond.cleanup, label %for.body.preheader
for.body.preheader: ; preds = %entry
br label %for.body
for.cond.cleanup.loopexit: ; preds = %if.end4
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
call void @x(ptr nonnull %bar_arr)
ret void
for.body: ; preds = %for.body.preheader, %if.end4
%i.015 = phi i32 [ %inc, %if.end4 ], [ 0, %for.body.preheader ]
%cmp1 = icmp samesign ugt i32 %i.015, 2
br i1 %cmp1, label %if.then, label %if.end4
if.then: ; preds = %for.body
call void @noreturn_with_i32(i32 %i.015)
unreachable
if.end4: ; preds = %for.body
%arrayidx = getelementptr inbounds nuw [1024 x i8], ptr %foo_arr, i64 0, i32 %i.015
%0 = load i8, ptr %arrayidx, align 1
%1 = xor i8 %0, 54
%arrayidx7 = getelementptr inbounds nuw [1025 x i8], ptr %bar_arr, i64 0, i32 %i.015
store i8 %1, ptr %arrayidx7, align 1
%inc = add nuw nsw i32 %i.015, 1
%cmp = icmp ult i32 %inc, %block_size
br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit
}
define void @no_optimize_depdendent_load_trap(i32 %block_size) {
; CHECK-LABEL: define void @no_optimize_depdendent_load_trap(
; CHECK-SAME: i32 [[BLOCK_SIZE:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[FOO_ARR:%.*]] = alloca [2 x i8], align 16
; CHECK-NEXT: [[BAR_ARR:%.*]] = alloca [2 x i8], align 16
; CHECK-NEXT: call void @x(ptr nonnull [[FOO_ARR]])
; CHECK-NEXT: [[CMP14_NOT:%.*]] = icmp eq i32 [[BLOCK_SIZE]], 0
; CHECK-NEXT: br i1 [[CMP14_NOT]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY_PREHEADER:.*]]
; CHECK: [[FOR_BODY_PREHEADER]]:
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_COND_CLEANUP_LOOPEXIT:.*]]:
; CHECK-NEXT: br label %[[FOR_COND_CLEANUP]]
; CHECK: [[FOR_COND_CLEANUP]]:
; CHECK-NEXT: call void @x(ptr nonnull [[BAR_ARR]])
; CHECK-NEXT: ret void
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[I_015:%.*]] = phi i32 [ [[INC:%.*]], %[[IF_END4:.*]] ], [ 0, %[[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: [[CMP1:%.*]] = icmp samesign ugt i32 [[I_015]], 2
; CHECK-NEXT: br i1 [[CMP1]], label %[[IF_THEN:.*]], label %[[IF_END4]]
; CHECK: [[IF_THEN]]:
; CHECK-NEXT: [[I_015_LCSSA:%.*]] = load i8, ptr [[FOO_ARR]], align 1
; CHECK-NEXT: call void @noreturn_with_i8(i8 [[I_015_LCSSA]])
; CHECK-NEXT: unreachable
; CHECK: [[IF_END4]]:
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1024 x i8], ptr [[FOO_ARR]], i64 0, i32 [[I_015]]
; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[TMP0]], 54
; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw [1025 x i8], ptr [[BAR_ARR]], i64 0, i32 [[I_015]]
; CHECK-NEXT: store i8 [[TMP1]], ptr [[ARRAYIDX7]], align 1
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_015]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC]], [[BLOCK_SIZE]]
; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP_LOOPEXIT]]
;
entry:
%foo_arr = alloca [2 x i8], align 16
%bar_arr = alloca [2 x i8], align 16
call void @x(ptr nonnull %foo_arr)
%cmp14.not = icmp eq i32 %block_size, 0
br i1 %cmp14.not, label %for.cond.cleanup, label %for.body.preheader
for.body.preheader: ; preds = %entry
br label %for.body
for.cond.cleanup.loopexit: ; preds = %if.end4
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
call void @x(ptr nonnull %bar_arr)
ret void
for.body: ; preds = %for.body.preheader, %if.end4
%i.015 = phi i32 [ %inc, %if.end4 ], [ 0, %for.body.preheader ]
%cmp1 = icmp samesign ugt i32 %i.015, 2
br i1 %cmp1, label %if.then, label %if.end4
if.then: ; preds = %for.body
%r = load i8, ptr %foo_arr, align 1
call void @noreturn_with_i8(i8 %r)
unreachable
if.end4: ; preds = %for.body
%arrayidx = getelementptr inbounds nuw [1024 x i8], ptr %foo_arr, i64 0, i32 %i.015
%0 = load i8, ptr %arrayidx, align 1
%1 = xor i8 %0, 54
%arrayidx7 = getelementptr inbounds nuw [1025 x i8], ptr %bar_arr, i64 0, i32 %i.015
store i8 %1, ptr %arrayidx7, align 1
%inc = add nuw nsw i32 %i.015, 1
%cmp = icmp ult i32 %inc, %block_size
br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit
}
declare void @x(ptr noundef) local_unnamed_addr
declare i1 @pred() local_unnamed_addr
declare void @llvm.trap() #0
declare void @noreturn(ptr) #0
declare void @noreturn_with_i32(i32) #0
declare void @noreturn_with_i8(i8) #0
declare void @noreturn_with_argmem(ptr) #1
attributes #0 = { cold noreturn nounwind memory(inaccessiblemem: write) }
attributes #1 = { cold noreturn nounwind memory(argmem: read) }