| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 |
| ; RUN: opt -passes=loop-vectorize -mtriple=aarch64 -mattr=+crypto -S %s | FileCheck %s |
| |
| declare i64 @llvm.clmul.i64(i64 %a, i64 %b) |
| |
| define void @clmul_loop(ptr %a, ptr %b, ptr %c, i64 %n) { |
| ; CHECK-LABEL: define void @clmul_loop( |
| ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i64 [[N:%.*]]) #[[ATTR1:[0-9]+]] { |
| ; CHECK-NEXT: [[ENTRY:.*]]: |
| ; CHECK-NEXT: [[B3:%.*]] = ptrtoaddr ptr [[B]] to i64 |
| ; CHECK-NEXT: [[A2:%.*]] = ptrtoaddr ptr [[A]] to i64 |
| ; CHECK-NEXT: [[C1:%.*]] = ptrtoaddr ptr [[C]] to i64 |
| ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 6 |
| ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] |
| ; CHECK: [[VECTOR_MEMCHECK]]: |
| ; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[C1]], [[A2]] |
| ; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], 32 |
| ; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[C1]], [[B3]] |
| ; CHECK-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP1]], 32 |
| ; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] |
| ; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] |
| ; CHECK: [[VECTOR_PH]]: |
| ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4 |
| ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] |
| ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] |
| ; CHECK: [[VECTOR_BODY]]: |
| ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[A]], i64 [[INDEX]] |
| ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDEX]] |
| ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[C]], i64 [[INDEX]] |
| ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[TMP2]], i64 2 |
| ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP2]], align 8 |
| ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <2 x i64>, ptr [[TMP5]], align 8 |
| ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[TMP3]], i64 2 |
| ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <2 x i64>, ptr [[TMP3]], align 8 |
| ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <2 x i64>, ptr [[TMP6]], align 8 |
| ; CHECK-NEXT: [[TMP7:%.*]] = call <2 x i64> @llvm.clmul.v2i64(<2 x i64> [[WIDE_LOAD]], <2 x i64> [[WIDE_LOAD6]]) |
| ; CHECK-NEXT: [[TMP8:%.*]] = call <2 x i64> @llvm.clmul.v2i64(<2 x i64> [[WIDE_LOAD5]], <2 x i64> [[WIDE_LOAD7]]) |
| ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[TMP4]], i64 2 |
| ; CHECK-NEXT: store <2 x i64> [[TMP7]], ptr [[TMP4]], align 8 |
| ; CHECK-NEXT: store <2 x i64> [[TMP8]], ptr [[TMP9]], align 8 |
| ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 |
| ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] |
| ; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| ; CHECK: [[MIDDLE_BLOCK]]: |
| ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] |
| ; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_EXIT:.*]], label %[[SCALAR_PH]] |
| ; CHECK: [[SCALAR_PH]]: |
| ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] |
| ; CHECK-NEXT: br label %[[FOR_BODY:.*]] |
| ; CHECK: [[FOR_BODY]]: |
| ; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[FOR_BODY]] ] |
| ; CHECK-NEXT: [[PA:%.*]] = getelementptr i64, ptr [[A]], i64 [[I]] |
| ; CHECK-NEXT: [[PB:%.*]] = getelementptr i64, ptr [[B]], i64 [[I]] |
| ; CHECK-NEXT: [[PC:%.*]] = getelementptr i64, ptr [[C]], i64 [[I]] |
| ; CHECK-NEXT: [[VA:%.*]] = load i64, ptr [[PA]], align 8 |
| ; CHECK-NEXT: [[VB:%.*]] = load i64, ptr [[PB]], align 8 |
| ; CHECK-NEXT: [[R:%.*]] = call i64 @llvm.clmul.i64(i64 [[VA]], i64 [[VB]]) |
| ; CHECK-NEXT: store i64 [[R]], ptr [[PC]], align 8 |
| ; CHECK-NEXT: [[I_NEXT]] = add i64 [[I]], 1 |
| ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[I_NEXT]], [[N]] |
| ; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] |
| ; CHECK: [[FOR_EXIT]]: |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| br label %for.body |
| |
| for.body: |
| %i = phi i64 [0, %entry], [%i.next, %for.body] |
| |
| %pa = getelementptr i64, ptr %a, i64 %i |
| %pb = getelementptr i64, ptr %b, i64 %i |
| %pc = getelementptr i64, ptr %c, i64 %i |
| |
| %va = load i64, ptr %pa |
| %vb = load i64, ptr %pb |
| |
| %r = call i64 @llvm.clmul.i64(i64 %va, i64 %vb) |
| |
| store i64 %r, ptr %pc |
| |
| %i.next = add i64 %i, 1 |
| %cmp = icmp eq i64 %i.next, %n |
| br i1 %cmp, label %for.exit, label %for.body |
| |
| for.exit: |
| ret void |
| |
| } |
| ;. |
| ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} |
| ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} |
| ; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} |
| ; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]} |
| ;. |