blob: 663e6d865b64a47966a6772ef18db80ec02b4a45 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --include-generated-funcs
; RUN: opt -S -passes=verify,iroutliner -ir-outlining-no-cost < %s | FileCheck %s
; Here we have multiple exits, but the different sources, same outputs are
; needed, this checks that they are compressed, and moved into the appropriate
; output blocks.
define void @outline_outputs1() #0 {
entry:
%output = alloca i32, align 4
%result = alloca i32, align 4
%output2 = alloca i32, align 4
%result2 = alloca i32, align 4
%a = alloca i32, align 4
%b = alloca i32, align 4
br label %block_2
block_1:
%a2 = alloca i32, align 4
%b2 = alloca i32, align 4
br label %block_2
block_2:
%a2val = load i32, ptr %a
%b2val = load i32, ptr %b
%add2 = add i32 2, %a2val
%mul2 = mul i32 2, %b2val
br label %block_5
block_3:
%aval = load i32, ptr %a
%bval = load i32, ptr %b
%add = add i32 2, %aval
%mul = mul i32 2, %bval
br label %block_4
block_4:
store i32 %add, ptr %output, align 4
store i32 %mul, ptr %result, align 4
br label %block_6
block_5:
store i32 %add2, ptr %output, align 4
store i32 %mul2, ptr %result, align 4
br label %block_7
block_6:
%div = udiv i32 %aval, %bval
ret void
block_7:
%sub = sub i32 %a2val, %b2val
ret void
}
define void @outline_outputs2() #0 {
entry:
%output = alloca i32, align 4
%result = alloca i32, align 4
%output2 = alloca i32, align 4
%result2 = alloca i32, align 4
%a = alloca i32, align 4
%b = alloca i32, align 4
br label %block_2
block_1:
%a2 = alloca i32, align 4
%b2 = alloca i32, align 4
br label %block_2
block_2:
%a2val = load i32, ptr %a
%b2val = load i32, ptr %b
%add2 = add i32 2, %a2val
%mul2 = mul i32 2, %b2val
br label %block_5
block_3:
%aval = load i32, ptr %a
%bval = load i32, ptr %b
%add = add i32 2, %aval
%mul = mul i32 2, %bval
br label %block_4
block_4:
store i32 %add, ptr %output, align 4
store i32 %mul, ptr %result, align 4
br label %block_7
block_5:
store i32 %add2, ptr %output, align 4
store i32 %mul2, ptr %result, align 4
br label %block_6
block_6:
%diff = sub i32 %a2val, %b2val
ret void
block_7:
%quot = udiv i32 %aval, %bval
ret void
}
; CHECK-LABEL: @outline_outputs1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[BVAL_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[AVAL_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B2VAL_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[A2VAL_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT2:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT2:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: br label [[BLOCK_2:%.*]]
; CHECK: block_1:
; CHECK-NEXT: [[A2:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4
; CHECK-NEXT: br label [[BLOCK_2]]
; CHECK: block_2:
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A2VAL_LOC]])
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B2VAL_LOC]])
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[AVAL_LOC]])
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[BVAL_LOC]])
; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[A2VAL_LOC]], ptr [[B2VAL_LOC]], ptr [[AVAL_LOC]], ptr [[BVAL_LOC]])
; CHECK-NEXT: [[A2VAL_RELOAD:%.*]] = load i32, ptr [[A2VAL_LOC]], align 4
; CHECK-NEXT: [[B2VAL_RELOAD:%.*]] = load i32, ptr [[B2VAL_LOC]], align 4
; CHECK-NEXT: [[AVAL_RELOAD:%.*]] = load i32, ptr [[AVAL_LOC]], align 4
; CHECK-NEXT: [[BVAL_RELOAD:%.*]] = load i32, ptr [[BVAL_LOC]], align 4
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A2VAL_LOC]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B2VAL_LOC]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[AVAL_LOC]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[BVAL_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[BLOCK_6:%.*]], label [[BLOCK_7:%.*]]
; CHECK: block_6:
; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[AVAL_RELOAD]], [[BVAL_RELOAD]]
; CHECK-NEXT: ret void
; CHECK: block_7:
; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[A2VAL_RELOAD]], [[B2VAL_RELOAD]]
; CHECK-NEXT: ret void
;
;
; CHECK-LABEL: @outline_outputs2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[BVAL_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[AVAL_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B2VAL_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[A2VAL_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT2:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT2:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: br label [[BLOCK_2:%.*]]
; CHECK: block_1:
; CHECK-NEXT: [[A2:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4
; CHECK-NEXT: br label [[BLOCK_2]]
; CHECK: block_2:
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A2VAL_LOC]])
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B2VAL_LOC]])
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[AVAL_LOC]])
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[BVAL_LOC]])
; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[A2VAL_LOC]], ptr [[B2VAL_LOC]], ptr [[AVAL_LOC]], ptr [[BVAL_LOC]])
; CHECK-NEXT: [[A2VAL_RELOAD:%.*]] = load i32, ptr [[A2VAL_LOC]], align 4
; CHECK-NEXT: [[B2VAL_RELOAD:%.*]] = load i32, ptr [[B2VAL_LOC]], align 4
; CHECK-NEXT: [[AVAL_RELOAD:%.*]] = load i32, ptr [[AVAL_LOC]], align 4
; CHECK-NEXT: [[BVAL_RELOAD:%.*]] = load i32, ptr [[BVAL_LOC]], align 4
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A2VAL_LOC]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B2VAL_LOC]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[AVAL_LOC]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[BVAL_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[BLOCK_7:%.*]], label [[BLOCK_6:%.*]]
; CHECK: block_6:
; CHECK-NEXT: [[DIFF:%.*]] = sub i32 [[A2VAL_RELOAD]], [[B2VAL_RELOAD]]
; CHECK-NEXT: ret void
; CHECK: block_7:
; CHECK-NEXT: [[QUOT:%.*]] = udiv i32 [[AVAL_RELOAD]], [[BVAL_RELOAD]]
; CHECK-NEXT: ret void
;
;
; CHECK: define internal i1 @outlined_ir_func_0(
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: br label [[BLOCK_2_TO_OUTLINE:%.*]]
; CHECK: block_2_to_outline:
; CHECK-NEXT: [[A2VAL:%.*]] = load i32, ptr [[TMP0:%.*]], align 4
; CHECK-NEXT: [[B2VAL:%.*]] = load i32, ptr [[TMP1:%.*]], align 4
; CHECK-NEXT: [[ADD2:%.*]] = add i32 2, [[A2VAL]]
; CHECK-NEXT: [[MUL2:%.*]] = mul i32 2, [[B2VAL]]
; CHECK-NEXT: br label [[BLOCK_5:%.*]]
; CHECK: block_3:
; CHECK-NEXT: [[AVAL:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: [[BVAL:%.*]] = load i32, ptr [[TMP1]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add i32 2, [[AVAL]]
; CHECK-NEXT: [[MUL:%.*]] = mul i32 2, [[BVAL]]
; CHECK-NEXT: br label [[BLOCK_4:%.*]]
; CHECK: block_4:
; CHECK-NEXT: store i32 [[ADD]], ptr [[TMP2:%.*]], align 4
; CHECK-NEXT: store i32 [[MUL]], ptr [[TMP3:%.*]], align 4
; CHECK-NEXT: br label [[BLOCK_6_EXITSTUB:%.*]]
; CHECK: block_5:
; CHECK-NEXT: store i32 [[ADD2]], ptr [[TMP2]], align 4
; CHECK-NEXT: store i32 [[MUL2]], ptr [[TMP3]], align 4
; CHECK-NEXT: br label [[BLOCK_7_EXITSTUB:%.*]]
; CHECK: block_6.exitStub:
; CHECK-NEXT: store i32 [[AVAL]], ptr [[TMP6:%.*]], align 4
; CHECK-NEXT: store i32 [[BVAL]], ptr [[TMP7:%.*]], align 4
; CHECK-NEXT: ret i1 true
; CHECK: block_7.exitStub:
; CHECK-NEXT: store i32 [[A2VAL]], ptr [[TMP4:%.*]], align 4
; CHECK-NEXT: store i32 [[B2VAL]], ptr [[TMP5:%.*]], align 4
; CHECK-NEXT: ret i1 false
;