|  | ; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 5 | 
|  | ; RUN: opt -passes='print<access-info>' -disable-output %s 2>&1 | FileCheck %s | 
|  |  | 
|  | ; Test case for https://github.com/llvm/llvm-project/issues/87189. | 
|  | ; It is not safe to vectorize because %indices are loaded in the loop and the | 
|  | ; same indices could be loaded in later iterations. | 
|  | ; FIXME: currently this is incorrectly considered safe for vectorization with | 
|  | ; runtime checks | 
|  | define void @B_indices_loaded_in_loop_A_stored(ptr %A, ptr noalias %B, i64 %N, i64 %off) { | 
|  | ; CHECK-LABEL: 'B_indices_loaded_in_loop_A_stored' | 
|  | ; CHECK-NEXT:    loop: | 
|  | ; CHECK-NEXT:      Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop | 
|  | ; CHECK-NEXT:  Unsafe indirect dependence. | 
|  | ; CHECK-NEXT:      Dependences: | 
|  | ; CHECK-NEXT:        IndirectUnsafe: | 
|  | ; CHECK-NEXT:            %l = load i32, ptr %gep.B, align 4 -> | 
|  | ; CHECK-NEXT:            store i32 %inc, ptr %gep.B, align 4 | 
|  | ; CHECK-EMPTY: | 
|  | ; CHECK-NEXT:        Unknown: | 
|  | ; CHECK-NEXT:            %indices = load i8, ptr %gep.A, align 1 -> | 
|  | ; CHECK-NEXT:            store i32 %l, ptr %gep.C, align 4 | 
|  | ; CHECK-EMPTY: | 
|  | ; CHECK-NEXT:      Run-time memory checks: | 
|  | ; CHECK-NEXT:      Grouped accesses: | 
|  | ; CHECK-EMPTY: | 
|  | ; CHECK-NEXT:      Non vectorizable stores to invariant address were not found in loop. | 
|  | ; CHECK-NEXT:      SCEV assumptions: | 
|  | ; CHECK-EMPTY: | 
|  | ; CHECK-NEXT:      Expressions re-written: | 
|  | ; | 
|  | entry: | 
|  | br label %loop | 
|  |  | 
|  | loop: | 
|  | %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] | 
|  | %iv.off = add nuw nsw i64 %iv, %off | 
|  | %gep.A = getelementptr inbounds i8, ptr %A, i64 %iv.off | 
|  | %indices = load i8, ptr %gep.A, align 1 | 
|  | %indices.ext = zext i8 %indices to i64 | 
|  | %gep.B = getelementptr inbounds i32, ptr %B, i64 %indices.ext | 
|  | %l = load i32, ptr %gep.B, align 4 | 
|  | %inc = add i32 %l, 1 | 
|  | store i32 %inc, ptr %gep.B, align 4 | 
|  | %gep.C = getelementptr inbounds i32, ptr %A, i64 %iv | 
|  | store i32 %l, ptr %gep.C, align 4 | 
|  | %iv.next = add nuw nsw i64 %iv, 1 | 
|  | %ec = icmp eq i64 %iv.next, %N | 
|  | br i1 %ec, label %exit, label %loop | 
|  |  | 
|  | exit: | 
|  | ret void | 
|  | } | 
|  |  | 
|  | ; It is not safe to vectorize because %indices are loaded in the loop and the | 
|  | ; same indices could be loaded in later iterations. | 
|  | define void @B_indices_loaded_in_loop_A_not_stored(ptr %A, ptr noalias %B, i64 %N) { | 
|  | ; CHECK-LABEL: 'B_indices_loaded_in_loop_A_not_stored' | 
|  | ; CHECK-NEXT:    loop: | 
|  | ; CHECK-NEXT:      Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop | 
|  | ; CHECK-NEXT:  Unsafe indirect dependence. | 
|  | ; CHECK-NEXT:      Dependences: | 
|  | ; CHECK-NEXT:        IndirectUnsafe: | 
|  | ; CHECK-NEXT:            %l = load i32, ptr %gep.B, align 4 -> | 
|  | ; CHECK-NEXT:            store i32 %inc, ptr %gep.B, align 4 | 
|  | ; CHECK-EMPTY: | 
|  | ; CHECK-NEXT:      Run-time memory checks: | 
|  | ; CHECK-NEXT:      Grouped accesses: | 
|  | ; CHECK-EMPTY: | 
|  | ; CHECK-NEXT:      Non vectorizable stores to invariant address were not found in loop. | 
|  | ; CHECK-NEXT:      SCEV assumptions: | 
|  | ; CHECK-EMPTY: | 
|  | ; CHECK-NEXT:      Expressions re-written: | 
|  | ; | 
|  | entry: | 
|  | br label %loop | 
|  |  | 
|  | loop: | 
|  | %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] | 
|  | %gep.A = getelementptr inbounds i8, ptr %A, i64 %iv | 
|  | %indices = load i8, ptr %gep.A, align 1 | 
|  | %indices.ext = zext i8 %indices to i64 | 
|  | %gep.B = getelementptr inbounds i32, ptr %B, i64 %indices.ext | 
|  | %l = load i32, ptr %gep.B, align 4 | 
|  | %inc = add i32 %l, 1 | 
|  | store i32 %inc, ptr %gep.B, align 4 | 
|  | %iv.next = add nuw nsw i64 %iv, 1 | 
|  | %ec = icmp eq i64 %iv.next, %N | 
|  | br i1 %ec, label %exit, label %loop | 
|  |  | 
|  | exit: | 
|  | ret void | 
|  | } |