blob: db30fd23b0c9d53c4502dbdf7a76ab1931a6dcb2 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -S -mtriple=aarch64-none-elf -loop-reduce -lsr-preferred-addressing-mode=all < %s | FileCheck %s
define i32 @postindex_loop(ptr %p, i64 %n) {
; CHECK-LABEL: define i32 @postindex_loop(
; CHECK-SAME: ptr [[P:%.*]], i64 [[N:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[SCEVGEP:%.*]], %[[FOR_BODY]] ], [ [[P]], %[[ENTRY]] ]
; CHECK-NEXT: [[LSR_IV:%.*]] = phi i64 [ [[LSR_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[N]], %[[ENTRY]] ]
; CHECK-NEXT: [[RET:%.*]] = phi i32 [ [[ADD:%.*]], %[[FOR_BODY]] ], [ 0, %[[ENTRY]] ]
; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[LSR_IV1]], align 4
; CHECK-NEXT: [[ADD]] = add i32 [[RET]], [[VAL]]
; CHECK-NEXT: [[LSR_IV_NEXT]] = add i64 [[LSR_IV]], -1
; CHECK-NEXT: [[SCEVGEP]] = getelementptr i8, ptr [[LSR_IV1]], i64 4
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[LSR_IV_NEXT]], 0
; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT:.*]], label %[[FOR_BODY]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret i32 [[ADD]]
;
entry:
br label %for.body
for.body:
%idx = phi i64 [ %idx.next, %for.body ], [ 0, %entry ]
%ret = phi i32 [ %add, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds nuw i32, ptr %p, i64 %idx
%val = load i32, ptr %arrayidx, align 4
%add = add i32 %ret, %val
%idx.next = add nuw nsw i64 %idx, 1
%exitcond = icmp eq i64 %idx.next, %n
br i1 %exitcond, label %exit, label %for.body
exit:
ret i32 %add
}
; Preindex saves a setup instruction compared to postindex
; FIXME: We currently don't recognize that preindex is possible here
define i32 @preindex_loop(ptr %p, i64 %n) {
; CHECK-LABEL: define i32 @preindex_loop(
; CHECK-SAME: ptr [[P:%.*]], i64 [[N:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr nuw i8, ptr [[P]], i64 4
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[SCEVGEP2:%.*]], %[[FOR_BODY]] ], [ [[SCEVGEP]], %[[ENTRY]] ]
; CHECK-NEXT: [[LSR_IV:%.*]] = phi i64 [ [[LSR_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[N]], %[[ENTRY]] ]
; CHECK-NEXT: [[RET:%.*]] = phi i32 [ [[ADD:%.*]], %[[FOR_BODY]] ], [ 0, %[[ENTRY]] ]
; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[LSR_IV1]], align 4
; CHECK-NEXT: [[ADD]] = add i32 [[RET]], [[VAL]]
; CHECK-NEXT: [[LSR_IV_NEXT]] = add i64 [[LSR_IV]], -1
; CHECK-NEXT: [[SCEVGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i64 4
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[LSR_IV_NEXT]], 0
; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT:.*]], label %[[FOR_BODY]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret i32 [[ADD]]
;
entry:
br label %for.body
for.body:
%idx = phi i64 [ %idx.next, %for.body ], [ 0, %entry ]
%ret = phi i32 [ %add, %for.body ], [ 0, %entry ]
%idx.next = add nuw nsw i64 %idx, 1
%arrayidx = getelementptr inbounds nuw i32, ptr %p, i64 %idx.next
%val = load i32, ptr %arrayidx, align 4
%add = add i32 %ret, %val
%exitcond = icmp eq i64 %idx.next, %n
br i1 %exitcond, label %exit, label %for.body
exit:
ret i32 %add
}
; We should use offset addressing here as postindex uses an extra register.
; FIXME: We currently use postindex as we don't realize the load of val2 is also
; a use of p that needs it to be live in the loop.
define i64 @offset_loop(ptr %p, i64 %n) {
; CHECK-LABEL: define i64 @offset_loop(
; CHECK-SAME: ptr [[P:%.*]], i64 [[N:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[LSR_IV:%.*]] = phi ptr [ [[SCEVGEP:%.*]], %[[FOR_BODY]] ], [ [[P]], %[[ENTRY]] ]
; CHECK-NEXT: [[RET:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[IDX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IDX_NEXT:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[VAL1:%.*]] = load i64, ptr [[LSR_IV]], align 4
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i64, ptr [[P]], i64 [[VAL1]]
; CHECK-NEXT: [[VAL2:%.*]] = load i64, ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[ADD]] = add i64 [[VAL2]], [[RET]]
; CHECK-NEXT: [[IDX_NEXT]] = add nuw nsw i64 [[IDX]], 1
; CHECK-NEXT: [[SCEVGEP]] = getelementptr i8, ptr [[LSR_IV]], i64 8
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[IDX_NEXT]], [[VAL1]]
; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_END:.*]], label %[[FOR_BODY]]
; CHECK: [[FOR_END]]:
; CHECK-NEXT: ret i64 [[ADD]]
;
entry:
br label %for.body
for.body:
%ret = phi i64 [ 0, %entry ], [ %add, %for.body ]
%idx = phi i64 [ 0, %entry ], [ %idx.next, %for.body ]
%arrayidx1 = getelementptr inbounds nuw i64, ptr %p, i64 %idx
%val1 = load i64, ptr %arrayidx1, align 4
%arrayidx2 = getelementptr inbounds nuw i64, ptr %p, i64 %val1
%val2 = load i64, ptr %arrayidx2, align 4
%add = add i64 %val2, %ret
%idx.next = add nuw nsw i64 %idx, 1
%cmp = icmp eq i64 %idx.next, %val1
br i1 %cmp, label %for.end, label %for.body
for.end:
ret i64 %add
}
; We can't use postindex addressing on the conditional load of qval and can't
; convert the loop condition to a compare with zero, so we should instead use
; offset addressing.
; FIXME: Currently we don't notice the load of qval is conditional, and attempt
; postindex addressing anyway.
define i32 @conditional_load(ptr %p, ptr %q, ptr %n) {
; CHECK-LABEL: define i32 @conditional_load(
; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], ptr [[N:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[SCEVGEP2:%.*]], %[[FOR_INC:.*]] ], [ [[P]], %[[ENTRY]] ]
; CHECK-NEXT: [[LSR_IV:%.*]] = phi ptr [ [[SCEVGEP:%.*]], %[[FOR_INC]] ], [ [[Q]], %[[ENTRY]] ]
; CHECK-NEXT: [[IDX:%.*]] = phi i64 [ [[IDX_NEXT:%.*]], %[[FOR_INC]] ], [ 0, %[[ENTRY]] ]
; CHECK-NEXT: [[RET:%.*]] = phi i32 [ [[RET_NEXT:%.*]], %[[FOR_INC]] ], [ 0, %[[ENTRY]] ]
; CHECK-NEXT: [[PVAL:%.*]] = load i32, ptr [[LSR_IV1]], align 4
; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[PVAL]], 0
; CHECK-NEXT: [[SCEVGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i64 4
; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label %[[FOR_INC]], label %[[IF_THEN:.*]]
; CHECK: [[IF_THEN]]:
; CHECK-NEXT: [[QVAL:%.*]] = load i32, ptr [[LSR_IV]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[RET]], [[QVAL]]
; CHECK-NEXT: br label %[[FOR_INC]]
; CHECK: [[FOR_INC]]:
; CHECK-NEXT: [[RET_NEXT]] = phi i32 [ [[ADD]], %[[IF_THEN]] ], [ [[RET]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[IDX_NEXT]] = add nuw nsw i64 [[IDX]], 1
; CHECK-NEXT: [[NVAL:%.*]] = load volatile i64, ptr [[N]], align 8
; CHECK-NEXT: [[SCEVGEP]] = getelementptr i8, ptr [[LSR_IV]], i64 4
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[IDX_NEXT]], [[NVAL]]
; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret i32 [[RET_NEXT]]
;
entry:
br label %for.body
for.body:
%idx = phi i64 [ %idx.next, %for.inc ], [ 0, %entry ]
%ret = phi i32 [ %ret.next, %for.inc ], [ 0, %entry ]
%arrayidx = getelementptr inbounds nuw i32, ptr %p, i64 %idx
%pval = load i32, ptr %arrayidx, align 4
%tobool.not = icmp eq i32 %pval, 0
br i1 %tobool.not, label %for.inc, label %if.then
if.then:
%arrayidx1 = getelementptr inbounds nuw i32, ptr %q, i64 %idx
%qval = load i32, ptr %arrayidx1, align 4
%add = add i32 %ret, %qval
br label %for.inc
for.inc:
%ret.next = phi i32 [ %add, %if.then ], [ %ret, %for.body ]
%idx.next = add nuw nsw i64 %idx, 1
%nval = load volatile i64, ptr %n, align 8
%cmp = icmp slt i64 %idx.next, %nval
br i1 %cmp, label %for.body, label %exit
exit:
ret i32 %ret.next
}